diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java index 646e80083ddc3..c5de4d4130168 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; - import java.io.IOException; import org.apache.hadoop.fs.FileStatus; @@ -33,6 +31,9 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + /** * Helper methods useful for writing ACL tests. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java index 5b1f6e4d4d6ec..33336ed4f8df5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java @@ -22,7 +22,16 @@ import static org.apache.hadoop.fs.permission.AclEntryScope.*; import static org.apache.hadoop.fs.permission.AclEntryType.*; import static org.apache.hadoop.fs.permission.FsAction.*; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.FileNotFoundException; import java.io.IOException; @@ -50,13 +59,10 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Lists; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; @@ -81,9 +87,6 @@ public abstract class FSAclBaseTest { private static int pathCount = 0; private static Path path; - @Rule - public ExpectedException exception = ExpectedException.none(); - private FileSystem fs, fsAsBruce, fsAsDiana, fsAsSupergroupMember, fsAsBob; protected static void startCluster() throws IOException { @@ -92,21 +95,21 @@ protected static void startCluster() throws IOException { cluster.waitActive(); } - @AfterClass + @AfterAll public static void shutdown() { if (cluster != null) { cluster.shutdown(); } } - @Before + @BeforeEach public void setUp() throws Exception { pathCount += 1; path = new Path("/p" + pathCount); initFileSystems(); } - @After + @AfterEach public void destroyFileSystems() { IOUtils.cleanupWithLogger(null, fs, fsAsBruce, fsAsDiana, fsAsSupergroupMember); @@ -123,26 +126,23 @@ public void testModifyAclEntries() throws IOException { aclEntry(ACCESS, OTHER, NONE), aclEntry(DEFAULT, USER, "foo", ALL)); fs.setAcl(path, aclSpec); - Assert.assertTrue(path + " should have ACLs in FileStatus!", - fs.getFileStatus(path).hasAcl()); + assertTrue(fs.getFileStatus(path).hasAcl(), + path + " should have ACLs in FileStatus!"); aclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(DEFAULT, USER, "foo", READ_EXECUTE)); fs.modifyAclEntries(path, aclSpec); - Assert.assertTrue(path + " should have ACLs in FileStatus!", - fs.getFileStatus(path).hasAcl()); + assertTrue(fs.getFileStatus(path).hasAcl(), + path + " should have ACLs in FileStatus!"); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", READ_EXECUTE), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", READ_EXECUTE), + aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission((short)010750); assertAclFeature(true); } @@ -162,9 +162,9 @@ public void testModifyAclEntriesOnlyAccess() throws IOException { fs.modifyAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", READ_EXECUTE), - aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", READ_EXECUTE), + aclEntry(ACCESS, GROUP, READ_EXECUTE)}, + returned); assertPermission((short)010750); assertAclFeature(true); } @@ -180,12 +180,10 @@ public void testModifyAclEntriesOnlyDefault() throws IOException { fs.modifyAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission((short)010750); assertAclFeature(true); } @@ -199,9 +197,9 @@ public void testModifyAclEntriesMinimal() throws IOException { fs.modifyAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", READ_WRITE), - aclEntry(ACCESS, GROUP, READ) }, returned); + assertArrayEquals( + new AclEntry[]{aclEntry(ACCESS, USER, "foo", READ_WRITE), aclEntry(ACCESS, GROUP, READ)}, + returned); assertPermission((short)010660); assertAclFeature(true); } @@ -216,10 +214,9 @@ public void testModifyAclEntriesMinimalDefault() throws IOException { fs.modifyAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission((short)010750); assertAclFeature(true); } @@ -234,9 +231,9 @@ public void testModifyAclEntriesCustomMask() throws IOException { fs.modifyAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, READ) }, returned); + assertArrayEquals( + new AclEntry[]{aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ)}, + returned); assertPermission((short)010600); assertAclFeature(true); } @@ -257,36 +254,37 @@ public void testModifyAclEntriesStickyBit() throws IOException { fs.modifyAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", READ_EXECUTE), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", READ_EXECUTE), + aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission((short)011750); assertAclFeature(true); } - @Test(expected=FileNotFoundException.class) + @Test public void testModifyAclEntriesPathNotFound() throws IOException { - // Path has not been created. - List aclSpec = Lists.newArrayList( - aclEntry(ACCESS, USER, ALL), - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(ACCESS, OTHER, NONE)); - fs.modifyAclEntries(path, aclSpec); + assertThrows(FileNotFoundException.class, () -> { + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE)); + fs.modifyAclEntries(path, aclSpec); + }); + } - @Test(expected=AclException.class) + @Test public void testModifyAclEntriesDefaultOnFile() throws IOException { - fs.create(path).close(); - fs.setPermission(path, FsPermission.createImmutable((short)0640)); - List aclSpec = Lists.newArrayList( - aclEntry(DEFAULT, USER, "foo", ALL)); - fs.modifyAclEntries(path, aclSpec); + assertThrows(AclException.class, () -> { + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short) 0640)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.modifyAclEntries(path, aclSpec); + }); } @Test @@ -305,12 +303,10 @@ public void testRemoveAclEntries() throws IOException { fs.removeAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission((short)010750); assertAclFeature(true); } @@ -331,9 +327,9 @@ public void testRemoveAclEntriesOnlyAccess() throws IOException { fs.removeAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "bar", READ_WRITE), - aclEntry(ACCESS, GROUP, READ_WRITE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "bar", READ_WRITE), + aclEntry(ACCESS, GROUP, READ_WRITE)}, + returned); assertPermission((short)010760); assertAclFeature(true); } @@ -353,12 +349,10 @@ public void testRemoveAclEntriesOnlyDefault() throws IOException { fs.removeAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, "bar", READ_EXECUTE), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "bar", READ_EXECUTE), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission((short)010750); assertAclFeature(true); } @@ -403,10 +397,9 @@ public void testRemoveAclEntriesMinimalDefault() throws IOException { fs.removeAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission((short)010750); assertAclFeature(true); } @@ -427,22 +420,22 @@ public void testRemoveAclEntriesStickyBit() throws IOException { fs.removeAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission((short)011750); assertAclFeature(true); } - @Test(expected=FileNotFoundException.class) + @Test public void testRemoveAclEntriesPathNotFound() throws IOException { - // Path has not been created. - List aclSpec = Lists.newArrayList( - aclEntry(ACCESS, USER, "foo")); - fs.removeAclEntries(path, aclSpec); + assertThrows(FileNotFoundException.class, () -> { + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "foo")); + fs.removeAclEntries(path, aclSpec); + }); + } @Test @@ -458,9 +451,9 @@ public void testRemoveDefaultAcl() throws Exception { fs.removeDefaultAcl(path); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE)}, + returned); assertPermission((short)010770); assertAclFeature(true); // restart of the cluster @@ -483,9 +476,9 @@ public void testRemoveDefaultAclOnlyAccess() throws Exception { fs.removeDefaultAcl(path); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE)}, + returned); assertPermission((short)010770); assertAclFeature(true); // restart of the cluster @@ -543,9 +536,9 @@ public void testRemoveDefaultAclStickyBit() throws Exception { fs.removeDefaultAcl(path); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE)}, + returned); assertPermission((short)011770); assertAclFeature(true); // restart of the cluster @@ -555,10 +548,12 @@ public void testRemoveDefaultAclStickyBit() throws Exception { assertArrayEquals(returned, afterRestart); } - @Test(expected=FileNotFoundException.class) + @Test public void testRemoveDefaultAclPathNotFound() throws IOException { + assertThrows(FileNotFoundException.class, () -> { + fs.removeDefaultAcl(path); + }); // Path has not been created. - fs.removeDefaultAcl(path); } @Test @@ -572,15 +567,15 @@ public void testRemoveAcl() throws IOException { aclEntry(DEFAULT, USER, "foo", ALL)); fs.setAcl(path, aclSpec); - Assert.assertTrue(path + " should have ACLs in FileStatus!", - fs.getFileStatus(path).hasAcl()); - Assert.assertTrue(path + " should have ACLs in FileStatus#toString()!", - fs.getFileStatus(path).toString().contains("hasAcl=true")); + assertTrue(fs.getFileStatus(path).hasAcl(), + path + " should have ACLs in FileStatus!"); + assertTrue(fs.getFileStatus(path).toString().contains("hasAcl=true"), + path + " should have ACLs in FileStatus#toString()!"); fs.removeAcl(path); - Assert.assertFalse(path + " should not have ACLs in FileStatus!", - fs.getFileStatus(path).hasAcl()); - Assert.assertTrue(path + " should not have ACLs in FileStatus#toString()!", - fs.getFileStatus(path).toString().contains("hasAcl=false")); + assertFalse(fs.getFileStatus(path).hasAcl(), + path + " should not have ACLs in FileStatus!"); + assertTrue(fs.getFileStatus(path).toString().contains("hasAcl=false"), + path + " should not have ACLs in FileStatus#toString()!"); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); @@ -636,10 +631,12 @@ public void testRemoveAclOnlyDefault() throws IOException { assertAclFeature(false); } - @Test(expected=FileNotFoundException.class) + @Test public void testRemoveAclPathNotFound() throws IOException { + assertThrows(FileNotFoundException.class, () -> { + fs.removeAcl(path); + }); // Path has not been created. - fs.removeAcl(path); } @Test @@ -654,14 +651,11 @@ public void testSetAcl() throws IOException { fs.setAcl(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, "foo", ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission((short)010770); assertAclFeature(true); } @@ -678,9 +672,9 @@ public void testSetAclOnlyAccess() throws IOException { fs.setAcl(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", READ), - aclEntry(ACCESS, GROUP, READ) }, returned); + assertArrayEquals( + new AclEntry[]{aclEntry(ACCESS, USER, "foo", READ), aclEntry(ACCESS, GROUP, READ)}, + returned); assertPermission((short)010640); assertAclFeature(true); } @@ -693,12 +687,10 @@ public void testSetAclOnlyDefault() throws IOException { fs.setAcl(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, "foo", ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission((short)010750); assertAclFeature(true); } @@ -735,10 +727,9 @@ public void testSetAclMinimalDefault() throws IOException { fs.setAcl(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission((short)010750); assertAclFeature(true); } @@ -756,9 +747,9 @@ public void testSetAclCustomMask() throws IOException { fs.setAcl(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", READ), - aclEntry(ACCESS, GROUP, READ) }, returned); + assertArrayEquals( + new AclEntry[]{aclEntry(ACCESS, USER, "foo", READ), aclEntry(ACCESS, GROUP, READ)}, + returned); assertPermission((short)010670); assertAclFeature(true); } @@ -775,36 +766,37 @@ public void testSetAclStickyBit() throws IOException { fs.setAcl(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, "foo", ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission((short)011770); assertAclFeature(true); } - @Test(expected=FileNotFoundException.class) + @Test public void testSetAclPathNotFound() throws IOException { - // Path has not been created. - List aclSpec = Lists.newArrayList( - aclEntry(ACCESS, USER, READ_WRITE), - aclEntry(ACCESS, USER, "foo", READ), - aclEntry(ACCESS, GROUP, READ), - aclEntry(ACCESS, OTHER, NONE)); - fs.setAcl(path, aclSpec); + assertThrows(FileNotFoundException.class, () -> { + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, USER, "foo", READ), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, NONE)); + fs.setAcl(path, aclSpec); + }); + } - @Test(expected=AclException.class) + @Test public void testSetAclDefaultOnFile() throws IOException { - fs.create(path).close(); - fs.setPermission(path, FsPermission.createImmutable((short)0640)); - List aclSpec = Lists.newArrayList( - aclEntry(DEFAULT, USER, "foo", ALL)); - fs.setAcl(path, aclSpec); + assertThrows(AclException.class, () -> { + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short) 0640)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + }); } @Test @@ -820,14 +812,11 @@ public void testSetPermission() throws IOException { fs.setPermission(path, FsPermission.createImmutable((short)0700)); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, "foo", ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission((short)010700); assertAclFeature(true); } @@ -845,9 +834,9 @@ public void testSetPermissionOnlyAccess() throws IOException { fs.setPermission(path, FsPermission.createImmutable((short)0600)); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", READ), - aclEntry(ACCESS, GROUP, READ) }, returned); + assertArrayEquals( + new AclEntry[]{aclEntry(ACCESS, USER, "foo", READ), aclEntry(ACCESS, GROUP, READ)}, + returned); assertPermission((short)010600); assertAclFeature(true); } @@ -864,12 +853,10 @@ public void testSetPermissionOnlyDefault() throws IOException { fs.setPermission(path, FsPermission.createImmutable((short)0700)); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, "foo", ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission((short)010700); assertAclFeature(true); } @@ -907,9 +894,9 @@ public void testDefaultAclNewFile() throws Exception { fs.create(filePath).close(); AclStatus s = fs.getAclStatus(filePath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE)}, + returned); assertPermission(filePath, (short)010660); assertAclFeature(filePath, true); } @@ -934,9 +921,9 @@ public void testUMaskDefaultAclNewFile() throws Exception { fs.create(filePath).close(); AclStatus s = fs.getAclStatus(filePath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[]{ - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, READ_WRITE)}, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_WRITE)}, + returned); assertPermission(filePath, (short) 010640); fsDirectory.setPosixAclInheritanceEnabled(true); @@ -944,9 +931,9 @@ public void testUMaskDefaultAclNewFile() throws Exception { fs.create(file2Path).close(); AclStatus s2 = fs.getAclStatus(file2Path); AclEntry[] returned2 = s2.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[]{ - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, READ_WRITE)}, returned2); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_WRITE)}, + returned2); assertPermission(file2Path, (short) 010660); } finally { fsDirectory.setPosixAclInheritanceEnabled(oldEnabled); @@ -992,24 +979,21 @@ public void testDefaultAclNewDir() throws Exception { List aclSpec = Lists.newArrayList( aclEntry(DEFAULT, USER, "foo", ALL)); fs.setAcl(path, aclSpec); - Assert.assertTrue(path + " should have ACLs in FileStatus!", - fs.getFileStatus(path).hasAcl()); + assertTrue(fs.getFileStatus(path).hasAcl(), + path + " should have ACLs in FileStatus!"); Path dirPath = new Path(path, "dir1"); fs.mkdirs(dirPath); - Assert.assertTrue(dirPath + " should have ACLs in FileStatus!", - fs.getFileStatus(dirPath).hasAcl()); + assertTrue(fs.getFileStatus(dirPath).hasAcl(), + dirPath + " should have ACLs in FileStatus!"); AclStatus s = fs.getAclStatus(dirPath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, "foo", ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(dirPath, (short)010770); assertAclFeature(dirPath, true); } @@ -1034,14 +1018,11 @@ public void testUMaskDefaultAclNewDir() throws Exception { fs.mkdirs(dirPath); AclStatus s = fs.getAclStatus(dirPath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[]{ - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, ALL), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, "foo", ALL), - aclEntry(DEFAULT, GROUP, ALL), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, NONE)}, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, ALL), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, ALL), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(dirPath, (short) 010750); fsDirectory.setPosixAclInheritanceEnabled(true); @@ -1049,14 +1030,11 @@ public void testUMaskDefaultAclNewDir() throws Exception { fs.mkdirs(dir2Path); AclStatus s2 = fs.getAclStatus(dir2Path); AclEntry[] returned2 = s2.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[]{ - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, ALL), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, "foo", ALL), - aclEntry(DEFAULT, GROUP, ALL), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, NONE)}, returned2); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, ALL), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, ALL), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE)}, + returned2); assertPermission(dir2Path, (short) 010770); } finally { fsDirectory.setPosixAclInheritanceEnabled(oldEnabled); @@ -1091,10 +1069,9 @@ public void testDefaultMinimalAclNewDir() throws Exception { fs.mkdirs(dirPath); AclStatus s = fs.getAclStatus(dirPath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(dirPath, (short)010750); assertAclFeature(dirPath, true); } @@ -1213,9 +1190,9 @@ public void testDefaultAclNewFileWithMode() throws Exception { .close(); AclStatus s = fs.getAclStatus(filePath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE)}, + returned); assertPermission(filePath, (short)010740); assertAclFeature(filePath, true); } @@ -1230,14 +1207,11 @@ public void testDefaultAclNewDirWithMode() throws Exception { fs.mkdirs(dirPath, new FsPermission((short)0740)); AclStatus s = fs.getAclStatus(dirPath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, "foo", ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, READ_EXECUTE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE)}, + returned); assertPermission(dirPath, (short)010740); assertAclFeature(dirPath, true); } @@ -1329,8 +1303,9 @@ public void testModifyAclEntriesMustBeOwnerOrSuper() throws Exception { fsAsBruce.modifyAclEntries(bruceFile, aclSpec); fs.modifyAclEntries(bruceFile, aclSpec); fsAsSupergroupMember.modifyAclEntries(bruceFile, aclSpec); - exception.expect(AccessControlException.class); - fsAsDiana.modifyAclEntries(bruceFile, aclSpec); + assertThrows(AccessControlException.class, () -> { + fsAsDiana.modifyAclEntries(bruceFile, aclSpec); + }); } @Test @@ -1345,8 +1320,9 @@ public void testRemoveAclEntriesMustBeOwnerOrSuper() throws Exception { fsAsBruce.removeAclEntries(bruceFile, aclSpec); fs.removeAclEntries(bruceFile, aclSpec); fsAsSupergroupMember.removeAclEntries(bruceFile, aclSpec); - exception.expect(AccessControlException.class); - fsAsDiana.removeAclEntries(bruceFile, aclSpec); + assertThrows(AccessControlException.class, () -> { + fsAsDiana.removeAclEntries(bruceFile, aclSpec); + }); } @Test @@ -1359,8 +1335,9 @@ public void testRemoveDefaultAclMustBeOwnerOrSuper() throws Exception { fsAsBruce.removeDefaultAcl(bruceFile); fs.removeDefaultAcl(bruceFile); fsAsSupergroupMember.removeDefaultAcl(bruceFile); - exception.expect(AccessControlException.class); - fsAsDiana.removeDefaultAcl(bruceFile); + assertThrows(AccessControlException.class, () -> { + fsAsDiana.removeDefaultAcl(bruceFile); + }); } @Test @@ -1373,8 +1350,9 @@ public void testRemoveAclMustBeOwnerOrSuper() throws Exception { fsAsBruce.removeAcl(bruceFile); fs.removeAcl(bruceFile); fsAsSupergroupMember.removeAcl(bruceFile); - exception.expect(AccessControlException.class); - fsAsDiana.removeAcl(bruceFile); + assertThrows(AccessControlException.class, () -> { + fsAsDiana.removeAcl(bruceFile); + }); } @Test @@ -1392,8 +1370,9 @@ public void testSetAclMustBeOwnerOrSuper() throws Exception { fsAsBruce.setAcl(bruceFile, aclSpec); fs.setAcl(bruceFile, aclSpec); fsAsSupergroupMember.setAcl(bruceFile, aclSpec); - exception.expect(AccessControlException.class); - fsAsDiana.setAcl(bruceFile, aclSpec); + assertThrows(AccessControlException.class, () -> { + fsAsDiana.setAcl(bruceFile, aclSpec); + }); } @Test @@ -1411,8 +1390,9 @@ public void testGetAclStatusRequiresTraverseOrSuper() throws Exception { fsAsBruce.getAclStatus(bruceFile); fs.getAclStatus(bruceFile); fsAsSupergroupMember.getAclStatus(bruceFile); - exception.expect(AccessControlException.class); - fsAsDiana.getAclStatus(bruceFile); + assertThrows(AccessControlException.class, () -> { + fsAsDiana.getAclStatus(bruceFile); + }); } @Test @@ -1478,17 +1458,16 @@ public void testEffectiveAccess() throws Exception { // give all access at first fs.setPermission(p1, FsPermission.valueOf("-rwxrwxrwx")); AclStatus aclStatus = fs.getAclStatus(p1); - assertEquals("Entries should be empty", 0, aclStatus.getEntries().size()); - assertEquals("Permission should be carried by AclStatus", - fs.getFileStatus(p1).getPermission(), aclStatus.getPermission()); + assertEquals(0, aclStatus.getEntries().size(), "Entries should be empty"); + assertEquals(fs.getFileStatus(p1).getPermission(), aclStatus.getPermission(), + "Permission should be carried by AclStatus"); // Add a named entries with all access fs.modifyAclEntries(p1, Lists.newArrayList( aclEntry(ACCESS, USER, "bruce", ALL), aclEntry(ACCESS, GROUP, "groupY", ALL))); aclStatus = fs.getAclStatus(p1); - assertEquals("Entries should contain owner group entry also", 3, aclStatus - .getEntries().size()); + assertEquals(3, aclStatus.getEntries().size(), "Entries should contain owner group entry also"); // restrict the access fs.setPermission(p1, FsPermission.valueOf("-rwxr-----")); @@ -1540,8 +1519,8 @@ public void testDeDuplication() throws Exception { aclEntry(DEFAULT, GROUP, "testdeduplicategroup", ALL)); fs.mkdirs(p1); fs.modifyAclEntries(p1, aclSpec); - assertEquals("One more ACL feature should be unique", currentSize + 1, - AclStorage.getUniqueAclFeatures().getUniqueElementsSize()); + assertEquals(currentSize + 1, AclStorage.getUniqueAclFeatures().getUniqueElementsSize(), + "One more ACL feature should be unique"); currentSize++; } Path child1 = new Path(p1, "child1"); @@ -1549,11 +1528,10 @@ public void testDeDuplication() throws Exception { { // new child dir should copy entries from its parent. fs.mkdirs(child1); - assertEquals("One more ACL feature should be unique", currentSize + 1, - AclStorage.getUniqueAclFeatures().getUniqueElementsSize()); + assertEquals(currentSize + 1, AclStorage.getUniqueAclFeatures().getUniqueElementsSize(), + "One more ACL feature should be unique"); child1AclFeature = getAclFeature(child1, cluster); - assertEquals("Reference count should be 1", 1, - child1AclFeature.getRefCount()); + assertEquals(1, child1AclFeature.getRefCount(), "Reference count should be 1"); currentSize++; } Path child2 = new Path(p1, "child2"); @@ -1561,13 +1539,11 @@ public void testDeDuplication() throws Exception { // new child dir should copy entries from its parent. But all entries are // same as its sibling without any more acl changes. fs.mkdirs(child2); - assertEquals("existing AclFeature should be re-used", currentSize, - AclStorage.getUniqueAclFeatures().getUniqueElementsSize()); + assertEquals(currentSize, AclStorage.getUniqueAclFeatures().getUniqueElementsSize(), + "existing AclFeature should be re-used"); AclFeature child2AclFeature = getAclFeature(child1, cluster); - assertSame("Same Aclfeature should be re-used", child1AclFeature, - child2AclFeature); - assertEquals("Reference count should be 2", 2, - child2AclFeature.getRefCount()); + assertSame(child1AclFeature, child2AclFeature, "Same Aclfeature should be re-used"); + assertEquals(2, child2AclFeature.getRefCount(), "Reference count should be 2"); } { // modification of ACL on should decrement the original reference count @@ -1576,31 +1552,25 @@ public void testDeDuplication() throws Exception { "user1", ALL)); fs.modifyAclEntries(child1, aclSpec); AclFeature modifiedAclFeature = getAclFeature(child1, cluster); - assertEquals("Old Reference count should be 1", 1, - child1AclFeature.getRefCount()); - assertEquals("New Reference count should be 1", 1, - modifiedAclFeature.getRefCount()); + assertEquals(1, child1AclFeature.getRefCount(), "Old Reference count should be 1"); + assertEquals(1, modifiedAclFeature.getRefCount(), "New Reference count should be 1"); // removing the new added ACL entry should refer to old ACLfeature AclEntry aclEntry = new AclEntry.Builder().setScope(ACCESS).setType(USER) .setName("user1").build(); fs.removeAclEntries(child1, Lists.newArrayList(aclEntry)); - assertEquals("Old Reference count should be 2 again", 2, - child1AclFeature.getRefCount()); - assertEquals("New Reference count should be 0", 0, - modifiedAclFeature.getRefCount()); + assertEquals(2, child1AclFeature.getRefCount(), "Old Reference count should be 2 again"); + assertEquals(0, modifiedAclFeature.getRefCount(), "New Reference count should be 0"); } { // verify the reference count on deletion of Acls fs.removeAcl(child2); - assertEquals("Reference count should be 1", 1, - child1AclFeature.getRefCount()); + assertEquals(1, child1AclFeature.getRefCount(), "Reference count should be 1"); } { // verify the reference count on deletion of dir with ACL fs.delete(child1, true); - assertEquals("Reference count should be 0", 0, - child1AclFeature.getRefCount()); + assertEquals(0, child1AclFeature.getRefCount(), "Reference count should be 0"); } Path file1 = new Path(p1, "file1"); @@ -1610,11 +1580,9 @@ public void testDeDuplication() throws Exception { // Using same reference on creation of file fs.create(file1).close(); fileAclFeature = getAclFeature(file1, cluster); - assertEquals("Reference count should be 1", 1, - fileAclFeature.getRefCount()); + assertEquals(1, fileAclFeature.getRefCount(), "Reference count should be 1"); fs.create(file2).close(); - assertEquals("Reference count should be 2", 2, - fileAclFeature.getRefCount()); + assertEquals(2, fileAclFeature.getRefCount(), "Reference count should be 2"); } { // modifying ACLs on file should decrease the reference count on old @@ -1624,34 +1592,30 @@ public void testDeDuplication() throws Exception { // adding new ACL entry fs.modifyAclEntries(file1, aclSpec); AclFeature modifiedFileAcl = getAclFeature(file1, cluster); - assertEquals("Old Reference count should be 1", 1, - fileAclFeature.getRefCount()); - assertEquals("New Reference count should be 1", 1, - modifiedFileAcl.getRefCount()); + assertEquals(1, fileAclFeature.getRefCount(), "Old Reference count should be 1"); + assertEquals(1, modifiedFileAcl.getRefCount(), "New Reference count should be 1"); // removing the new added ACL entry should refer to old ACLfeature AclEntry aclEntry = new AclEntry.Builder().setScope(ACCESS).setType(USER) .setName("user1").build(); fs.removeAclEntries(file1, Lists.newArrayList(aclEntry)); - assertEquals("Old Reference count should be 2", 2, - fileAclFeature.getRefCount()); - assertEquals("New Reference count should be 0", 0, - modifiedFileAcl.getRefCount()); + assertEquals(2, fileAclFeature.getRefCount(), "Old Reference count should be 2"); + assertEquals(0, modifiedFileAcl.getRefCount(), "New Reference count should be 0"); } { // reference count should be decreased on deletion of files with ACLs fs.delete(file2, true); - assertEquals("Reference count should be decreased on delete of the file", - 1, fileAclFeature.getRefCount()); + assertEquals(1, fileAclFeature.getRefCount(), + "Reference count should be decreased on delete of the file"); fs.delete(file1, true); - assertEquals("Reference count should be decreased on delete of the file", - 0, fileAclFeature.getRefCount()); + assertEquals(0, fileAclFeature.getRefCount(), + "Reference count should be decreased on delete of the file"); // On reference count reaches 0 instance should be removed from map fs.create(file1).close(); AclFeature newFileAclFeature = getAclFeature(file1, cluster); - assertNotSame("Instance should be different on reference count 0", - fileAclFeature, newFileAclFeature); + assertNotSame(fileAclFeature, newFileAclFeature, + "Instance should be different on reference count 0"); fileAclFeature = newFileAclFeature; } Map restartRefCounter = new HashMap<>(); @@ -1670,12 +1634,12 @@ public void testDeDuplication() throws Exception { cluster.restartNameNode(true); List entriesAfterRestart = AclStorage.getUniqueAclFeatures() .getEntries(); - assertEquals("Entries before and after should be same", - entriesBeforeRestart, entriesAfterRestart); + assertEquals(entriesBeforeRestart, entriesAfterRestart, + "Entries before and after should be same"); for (AclFeature aclFeature : entriesAfterRestart) { int before = restartRefCounter.get(aclFeature); - assertEquals("ReferenceCount After Restart should be doubled", - before * 2, aclFeature.getRefCount()); + assertEquals(before * 2, aclFeature.getRefCount(), + "ReferenceCount After Restart should be doubled"); } } { @@ -1688,12 +1652,12 @@ public void testDeDuplication() throws Exception { cluster.restartNameNode(true); List entriesAfterRestart = AclStorage.getUniqueAclFeatures() .getEntries(); - assertEquals("Entries before and after should be same", - entriesBeforeRestart, entriesAfterRestart); + assertEquals(entriesBeforeRestart, entriesAfterRestart, + "Entries before and after should be same"); for (AclFeature aclFeature : entriesAfterRestart) { int before = restartRefCounter.get(aclFeature); - assertEquals("ReferenceCount After 2 Restarts should be tripled", - before * 3, aclFeature.getRefCount()); + assertEquals(before * 3, aclFeature.getRefCount(), + "ReferenceCount After 2 Restarts should be tripled"); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 14283f76fd429..3cf439b18426c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -445,7 +445,7 @@ public static void assertFileContentsDifferent( public static Map getFileMD5s(File... files) throws Exception { Map ret = Maps.newHashMap(); for (File f : files) { - assertTrue("Must exist: " + f, f.exists()); + assertTrue(f.exists(), "Must exist: " + f); ret.put(f, getFileMD5(f)); } return ret; @@ -514,7 +514,7 @@ public static void assertNNHasCheckpoints(MiniDFSCluster cluster, for (long checkpointTxId : txids) { File image = new File(nameDir, NNStorage.getImageFileName(checkpointTxId)); - assertTrue("Expected non-empty " + image, image.length() > 0); + assertTrue(image.length() > 0, "Expected non-empty " + image); } } } @@ -531,7 +531,7 @@ public static void assertNNHasRollbackCheckpoints(MiniDFSCluster cluster, for (long checkpointTxId : txids) { File image = new File(nameDir, NNStorage.getRollbackImageFileName(checkpointTxId)); - assertTrue("Expected non-empty " + image, image.length() > 0); + assertTrue(image.length() > 0, "Expected non-empty " + image); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java index f68838906819f..ed2396949ca22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java @@ -50,16 +50,19 @@ import static org.apache.hadoop.fs.permission.FsAction.READ; import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; @@ -99,7 +102,7 @@ public class FSXAttrBaseTest { private static final UserGroupInformation DIANA = UserGroupInformation.createUserForTesting("diana", new String[] { }); - @BeforeClass + @BeforeAll public static void init() throws Exception { conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); @@ -109,14 +112,14 @@ public static void init() throws Exception { initCluster(true); } - @AfterClass + @AfterAll public static void shutdown() { if (dfsCluster != null) { dfsCluster.shutdown(); } } - @Before + @BeforeEach public void setUp() throws Exception { pathCount += 1; path = new Path("/p" + pathCount); @@ -126,7 +129,7 @@ public void setUp() throws Exception { initFileSystem(); } - @After + @AfterEach public void destroyFileSystems() { IOUtils.cleanupWithLogger(null, fs); fs = null; @@ -139,7 +142,8 @@ public void destroyFileSystems() { * 3. Create multiple xattrs. * 4. Restart NN and save checkpoint scenarios. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testCreateXAttr() throws Exception { Map expectedXAttrs = Maps.newHashMap(); expectedXAttrs.put(name1, value1); @@ -156,19 +160,19 @@ private void doTestCreateXAttr(Path usePath, Map xattrs = fs.getXAttrs(usePath); - Assert.assertEquals(xattrs.size(), 1); - Assert.assertArrayEquals(value1, xattrs.get(name1)); + assertEquals(xattrs.size(), 1); + assertArrayEquals(value1, xattrs.get(name1)); fs.removeXAttr(usePath, name1); xattrs = fs.getXAttrs(usePath); - Assert.assertEquals(xattrs.size(), 0); + assertEquals(xattrs.size(), 0); // Create xattr which already exists. fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); try { fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); - Assert.fail("Creating xattr which already exists should fail."); + fail("Creating xattr which already exists should fail."); } catch (IOException e) { } fs.removeXAttr(usePath, name1); @@ -179,31 +183,31 @@ private void doTestCreateXAttr(Path usePath, Map ent : expectedXAttrs.entrySet()) { final byte[] val = (ent.getValue() == null) ? new byte[0] : ent.getValue(); - Assert.assertArrayEquals(val, xattrs.get(ent.getKey())); + assertArrayEquals(val, xattrs.get(ent.getKey())); } restart(false); initFileSystem(); xattrs = fs.getXAttrs(usePath); - Assert.assertEquals(xattrs.size(), expectedXAttrs.size()); + assertEquals(xattrs.size(), expectedXAttrs.size()); for (Map.Entry ent : expectedXAttrs.entrySet()) { final byte[] val = (ent.getValue() == null) ? new byte[0] : ent.getValue(); - Assert.assertArrayEquals(val, xattrs.get(ent.getKey())); + assertArrayEquals(val, xattrs.get(ent.getKey())); } restart(true); initFileSystem(); xattrs = fs.getXAttrs(usePath); - Assert.assertEquals(xattrs.size(), expectedXAttrs.size()); + assertEquals(xattrs.size(), expectedXAttrs.size()); for (Map.Entry ent : expectedXAttrs.entrySet()) { final byte[] val = (ent.getValue() == null) ? new byte[0] : ent.getValue(); - Assert.assertArrayEquals(val, xattrs.get(ent.getKey())); + assertArrayEquals(val, xattrs.get(ent.getKey())); } fs.delete(usePath, false); @@ -216,22 +220,23 @@ private void doTestCreateXAttr(Path usePath, Map xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 1); - Assert.assertArrayEquals(newValue1, xattrs.get(name1)); + assertEquals(xattrs.size(), 1); + assertArrayEquals(newValue1, xattrs.get(name1)); fs.removeXAttr(path, name1); // Replace xattr which does not exist. try { fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.REPLACE)); - Assert.fail("Replacing xattr which does not exist should fail."); + fail("Replacing xattr which does not exist should fail."); } catch (IOException e) { } @@ -240,23 +245,23 @@ public void testReplaceXAttr() throws Exception { fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path, name2, null, EnumSet.of(XAttrSetFlag.REPLACE)); xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 2); - Assert.assertArrayEquals(value1, xattrs.get(name1)); - Assert.assertArrayEquals(new byte[0], xattrs.get(name2)); + assertEquals(xattrs.size(), 2); + assertArrayEquals(value1, xattrs.get(name1)); + assertArrayEquals(new byte[0], xattrs.get(name2)); restart(false); initFileSystem(); xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 2); - Assert.assertArrayEquals(value1, xattrs.get(name1)); - Assert.assertArrayEquals(new byte[0], xattrs.get(name2)); + assertEquals(xattrs.size(), 2); + assertArrayEquals(value1, xattrs.get(name1)); + assertArrayEquals(new byte[0], xattrs.get(name2)); restart(true); initFileSystem(); xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 2); - Assert.assertArrayEquals(value1, xattrs.get(name1)); - Assert.assertArrayEquals(new byte[0], xattrs.get(name2)); + assertEquals(xattrs.size(), 2); + assertArrayEquals(value1, xattrs.get(name1)); + assertArrayEquals(new byte[0], xattrs.get(name2)); fs.removeXAttr(path, name1); fs.removeXAttr(path, name2); @@ -271,22 +276,23 @@ public void testReplaceXAttr() throws Exception { * 5. Set xattr and name is too long. * 6. Set xattr and value is too long. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testSetXAttr() throws Exception { FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); Map xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 1); - Assert.assertArrayEquals(value1, xattrs.get(name1)); + assertEquals(xattrs.size(), 1); + assertArrayEquals(value1, xattrs.get(name1)); fs.removeXAttr(path, name1); // Set xattr with null name try { fs.setXAttr(path, null, value1, EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); - Assert.fail("Setting xattr with null name should fail."); + fail("Setting xattr with null name should fail."); } catch (NullPointerException e) { GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e); } catch (RemoteException e) { @@ -298,10 +304,10 @@ public void testSetXAttr() throws Exception { try { fs.setXAttr(path, "user.", value1, EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); - Assert.fail("Setting xattr with empty name should fail."); + fail("Setting xattr with empty name should fail."); } catch (RemoteException e) { - assertEquals("Unexpected RemoteException: " + e, e.getClassName(), - HadoopIllegalArgumentException.class.getCanonicalName()); + assertEquals(e.getClassName(), HadoopIllegalArgumentException.class.getCanonicalName(), + "Unexpected RemoteException: " + e); GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e); } catch (HadoopIllegalArgumentException e) { GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e); @@ -311,11 +317,11 @@ public void testSetXAttr() throws Exception { try { fs.setXAttr(path, "a1", value1, EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); - Assert.fail("Setting xattr with invalid name prefix or without " + + fail("Setting xattr with invalid name prefix or without " + "name prefix should fail."); } catch (RemoteException e) { - assertEquals("Unexpected RemoteException: " + e, e.getClassName(), - HadoopIllegalArgumentException.class.getCanonicalName()); + assertEquals(e.getClassName(), HadoopIllegalArgumentException.class.getCanonicalName(), + "Unexpected RemoteException: " + e); GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e); } catch (HadoopIllegalArgumentException e) { GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e); @@ -324,8 +330,8 @@ public void testSetXAttr() throws Exception { // Set xattr without XAttrSetFlag fs.setXAttr(path, name1, value1); xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 1); - Assert.assertArrayEquals(value1, xattrs.get(name1)); + assertEquals(xattrs.size(), 1); + assertArrayEquals(value1, xattrs.get(name1)); fs.removeXAttr(path, name1); // XAttr exists, and replace it using CREATE|REPLACE flag. @@ -334,8 +340,8 @@ public void testSetXAttr() throws Exception { XAttrSetFlag.REPLACE)); xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 1); - Assert.assertArrayEquals(newValue1, xattrs.get(name1)); + assertEquals(xattrs.size(), 1); + assertArrayEquals(newValue1, xattrs.get(name1)); fs.removeXAttr(path, name1); @@ -345,7 +351,7 @@ public void testSetXAttr() throws Exception { fs.setXAttr(path, name3, null); try { fs.setXAttr(path, name4, null); - Assert.fail("Setting xattr should fail if total number of xattrs " + + fail("Setting xattr should fail if total number of xattrs " + "for inode exceeds max limit."); } catch (IOException e) { GenericTestUtils.assertExceptionContains("Cannot add additional XAttr", e); @@ -358,7 +364,7 @@ public void testSetXAttr() throws Exception { String longName = "user.0123456789abcdefX0123456789abcdefX0123456789abcdef"; try { fs.setXAttr(path, longName, null); - Assert.fail("Setting xattr should fail if name is too long."); + fail("Setting xattr should fail if name is too long."); } catch (IOException e) { GenericTestUtils.assertExceptionContains("XAttr is too big", e); GenericTestUtils.assertExceptionContains("total size is 50", e); @@ -368,7 +374,7 @@ public void testSetXAttr() throws Exception { byte[] longValue = new byte[MAX_SIZE]; try { fs.setXAttr(path, "user.a", longValue); - Assert.fail("Setting xattr should fail if value is too long."); + fail("Setting xattr should fail if value is too long."); } catch (IOException e) { GenericTestUtils.assertExceptionContains("XAttr is too big", e); GenericTestUtils.assertExceptionContains("total size is 38", e); @@ -394,19 +400,21 @@ public void testSetXAttr() throws Exception { * the caller does not have search access to the owning directory and read * access to the actual entity */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) + @SuppressWarnings("checkstyle:methodlength") public void testGetXAttrs() throws Exception { FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); final byte[] theValue = fs.getXAttr(path, "USER.a2"); - Assert.assertArrayEquals(value2, theValue); + assertArrayEquals(value2, theValue); /* An XAttr that was requested does not exist. */ try { final byte[] value = fs.getXAttr(path, name3); - Assert.fail("expected IOException"); + fail("expected IOException"); } catch (IOException e) { GenericTestUtils.assertExceptionContains( XAttrNotFoundException.DEFAULT_EXCEPTION_MSG, e); @@ -420,7 +428,7 @@ public void testGetXAttrs() throws Exception { names.add(name3); try { final Map xattrs = fs.getXAttrs(path, names); - Assert.fail("expected IOException"); + fail("expected IOException"); } catch (IOException e) { GenericTestUtils.assertExceptionContains( XAttrNotFoundException.DEFAULT_EXCEPTION_MSG, e); @@ -433,7 +441,7 @@ public void testGetXAttrs() throws Exception { /* Unknown namespace should throw an exception. */ try { final byte[] xattr = fs.getXAttr(path, "wackynamespace.foo"); - Assert.fail("expected IOException"); + fail("expected IOException"); } catch (Exception e) { GenericTestUtils.assertExceptionContains ("An XAttr name must be prefixed with " + @@ -458,7 +466,7 @@ public Object run() throws Exception { return null; } }); - Assert.fail("expected IOException"); + fail("expected IOException"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("User doesn't have permission", e); } @@ -481,7 +489,7 @@ public Object run() throws Exception { return null; } }); - Assert.fail("expected IOException"); + fail("expected IOException"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("Permission denied", e); } @@ -502,7 +510,7 @@ public Object run() throws Exception { return null; } }); - Assert.fail("expected IOException"); + fail("expected IOException"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("Permission denied", e); } @@ -518,7 +526,7 @@ public Object run() throws Exception { return null; } }); - Assert.fail("expected IOException"); + fail("expected IOException"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("Permission denied", e); } @@ -538,7 +546,7 @@ public Object run() throws Exception { return null; } }); - Assert.fail("expected IOException"); + fail("expected IOException"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("Permission denied", e); } @@ -564,7 +572,8 @@ public Object run() throws Exception { * 1. Remove xattr. * 2. Restart NN and save checkpoint scenarios. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testRemoveXAttr() throws Exception { FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); @@ -575,20 +584,20 @@ public void testRemoveXAttr() throws Exception { fs.removeXAttr(path, name2); Map xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 1); - Assert.assertArrayEquals(new byte[0], xattrs.get(name3)); + assertEquals(xattrs.size(), 1); + assertArrayEquals(new byte[0], xattrs.get(name3)); restart(false); initFileSystem(); xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 1); - Assert.assertArrayEquals(new byte[0], xattrs.get(name3)); + assertEquals(xattrs.size(), 1); + assertArrayEquals(new byte[0], xattrs.get(name3)); restart(true); initFileSystem(); xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 1); - Assert.assertArrayEquals(new byte[0], xattrs.get(name3)); + assertEquals(xattrs.size(), 1); + assertArrayEquals(new byte[0], xattrs.get(name3)); fs.removeXAttr(path, name3); } @@ -607,7 +616,8 @@ public void testRemoveXAttr() throws Exception { * the caller does not have execute access to the owning directory and write * access to the actual entity */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testRemoveXAttrPermissions() throws Exception { FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); @@ -617,7 +627,7 @@ public void testRemoveXAttrPermissions() throws Exception { try { fs.removeXAttr(path, name2); fs.removeXAttr(path, name2); - Assert.fail("expected IOException"); + fail("expected IOException"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("No matching attributes found", e); } @@ -627,10 +637,10 @@ public void testRemoveXAttrPermissions() throws Exception { "with user/trusted/security/system/raw, followed by a '.'"; try { fs.removeXAttr(path, "wackynamespace.foo"); - Assert.fail("expected IOException"); + fail("expected IOException"); } catch (RemoteException e) { - assertEquals("Unexpected RemoteException: " + e, e.getClassName(), - HadoopIllegalArgumentException.class.getCanonicalName()); + assertEquals(e.getClassName(), HadoopIllegalArgumentException.class.getCanonicalName(), + "Unexpected RemoteException: " + e); GenericTestUtils.assertExceptionContains(expectedExceptionString, e); } catch (HadoopIllegalArgumentException e) { GenericTestUtils.assertExceptionContains(expectedExceptionString, e); @@ -652,7 +662,7 @@ public Object run() throws Exception { return null; } }); - Assert.fail("expected IOException"); + fail("expected IOException"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("User doesn't have permission", e); } finally { @@ -675,7 +685,7 @@ public Object run() throws Exception { return null; } }); - Assert.fail("expected IOException"); + fail("expected IOException"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("Permission denied", e); } @@ -696,7 +706,7 @@ public Object run() throws Exception { return null; } }); - Assert.fail("expected IOException"); + fail("expected IOException"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("Permission denied", e); } @@ -712,7 +722,7 @@ public Object run() throws Exception { return null; } }); - Assert.fail("expected IOException"); + fail("expected IOException"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("Permission denied", e); } @@ -732,7 +742,7 @@ public Object run() throws Exception { return null; } }); - Assert.fail("expected IOException"); + fail("expected IOException"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("Permission denied", e); } @@ -753,7 +763,8 @@ public Object run() throws Exception { }); } - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testRenameFileWithXAttr() throws Exception { FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); @@ -761,9 +772,9 @@ public void testRenameFileWithXAttr() throws Exception { Path renamePath = new Path(path.toString() + "-rename"); fs.rename(path, renamePath); Map xattrs = fs.getXAttrs(renamePath); - Assert.assertEquals(xattrs.size(), 2); - Assert.assertArrayEquals(value1, xattrs.get(name1)); - Assert.assertArrayEquals(value2, xattrs.get(name2)); + assertEquals(xattrs.size(), 2); + assertArrayEquals(value1, xattrs.get(name1)); + assertArrayEquals(value2, xattrs.get(name2)); fs.removeXAttr(renamePath, name1); fs.removeXAttr(renamePath, name2); } @@ -778,7 +789,8 @@ public void testRenameFileWithXAttr() throws Exception { * Check that execute/scan access to the parent dir is sufficient to get * xattr names. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testListXAttrs() throws Exception { final UserGroupInformation user = UserGroupInformation. createUserForTesting("user", new String[] {"mygroup"}); @@ -795,7 +807,7 @@ public void testListXAttrs() throws Exception { /* listXAttrs on a path with no XAttrs.*/ final List noXAttrs = fs.listXAttrs(path); - assertTrue("XAttrs were found?", noXAttrs.size() == 0); + assertTrue(noXAttrs.size() == 0, "XAttrs were found?"); fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); @@ -892,7 +904,8 @@ public Object run() throws Exception { * 6) Restart NN without saving a checkpoint. * 7) Set xattrs again on the same file. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testCleanupXAttrs() throws Exception { FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); @@ -920,12 +933,13 @@ public void testCleanupXAttrs() throws Exception { fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); Map xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 2); - Assert.assertArrayEquals(value1, xattrs.get(name1)); - Assert.assertArrayEquals(value2, xattrs.get(name2)); + assertEquals(xattrs.size(), 2); + assertArrayEquals(value1, xattrs.get(name1)); + assertArrayEquals(value2, xattrs.get(name2)); } - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testXAttrAcl() throws Exception { FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750)); fs.setOwner(path, BRUCE.getUserName(), null); @@ -936,7 +950,7 @@ public void testXAttrAcl() throws Exception { Map xattrs; try { xattrs = fsAsDiana.getXAttrs(path); - Assert.fail("Diana should not have read access to get xattrs"); + fail("Diana should not have read access to get xattrs"); } catch (AccessControlException e) { // Ignore } @@ -945,18 +959,18 @@ public void testXAttrAcl() throws Exception { fsAsBruce.modifyAclEntries(path, Lists.newArrayList( aclEntry(ACCESS, USER, DIANA.getUserName(), READ))); xattrs = fsAsDiana.getXAttrs(path); - Assert.assertArrayEquals(value1, xattrs.get(name1)); + assertArrayEquals(value1, xattrs.get(name1)); try { fsAsDiana.removeXAttr(path, name1); - Assert.fail("Diana should not have write access to remove xattrs"); + fail("Diana should not have write access to remove xattrs"); } catch (AccessControlException e) { // Ignore } try { fsAsDiana.setXAttr(path, name2, value2); - Assert.fail("Diana should not have write access to set xattrs"); + fail("Diana should not have write access to set xattrs"); } catch (AccessControlException e) { // Ignore } @@ -964,12 +978,14 @@ public void testXAttrAcl() throws Exception { fsAsBruce.modifyAclEntries(path, Lists.newArrayList( aclEntry(ACCESS, USER, DIANA.getUserName(), ALL))); fsAsDiana.setXAttr(path, name2, value2); - Assert.assertArrayEquals(value2, fsAsDiana.getXAttrs(path).get(name2)); + assertArrayEquals(value2, fsAsDiana.getXAttrs(path).get(name2)); fsAsDiana.removeXAttr(path, name1); fsAsDiana.removeXAttr(path, name2); } - @Test(timeout = 120000) + @Test + @Timeout(value = 120) + @SuppressWarnings("checkstyle:methodlength") public void testRawXAttrs() throws Exception { final UserGroupInformation user = UserGroupInformation. createUserForTesting("user", new String[] {"mygroup"}); @@ -981,14 +997,14 @@ public void testRawXAttrs() throws Exception { { // getXAttr final byte[] value = fs.getXAttr(rawPath, raw1); - Assert.assertArrayEquals(value, value1); + assertArrayEquals(value, value1); } { // getXAttrs final Map xattrs = fs.getXAttrs(rawPath); - Assert.assertEquals(xattrs.size(), 1); - Assert.assertArrayEquals(value1, xattrs.get(raw1)); + assertEquals(xattrs.size(), 1); + assertArrayEquals(value1, xattrs.get(raw1)); fs.removeXAttr(rawPath, raw1); } @@ -999,8 +1015,8 @@ public void testRawXAttrs() throws Exception { XAttrSetFlag.REPLACE)); final Map xattrs = fs.getXAttrs(rawPath); - Assert.assertEquals(xattrs.size(), 1); - Assert.assertArrayEquals(newValue1, xattrs.get(raw1)); + assertEquals(xattrs.size(), 1); + assertArrayEquals(newValue1, xattrs.get(raw1)); fs.removeXAttr(rawPath, raw1); } @@ -1223,7 +1239,8 @@ public Object run() throws Exception { * This tests the "unreadable by superuser" xattr which denies access to a * file for the superuser. See HDFS-6705 for details. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testUnreadableBySuperuserXAttr() throws Exception { // Run tests as superuser... doTestUnreadableBySuperuserXAttr(fs, true); @@ -1278,7 +1295,7 @@ private void doTUBSXAInt(FileSystem userFs, boolean expectOpenFailure) // Test that the xattr can't be deleted by anyone. try { userFs.removeXAttr(filePath, security1); - Assert.fail("Removing security xattr should fail."); + fail("Removing security xattr should fail."); } catch (AccessControlException e) { GenericTestUtils.assertExceptionContains("The xattr '" + SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' can not be deleted.", e); @@ -1313,10 +1330,10 @@ private void doTUBSXAInt(FileSystem userFs, boolean expectOpenFailure) private void verifySecurityXAttrExists(FileSystem userFs) throws Exception { try { final Map xattrs = userFs.getXAttrs(filePath); - Assert.assertEquals(1, xattrs.size()); - Assert.assertNotNull(xattrs.get(security1)); - Assert.assertArrayEquals("expected empty byte[] from getXAttr", - new byte[0], userFs.getXAttr(filePath, security1)); + assertEquals(1, xattrs.size()); + assertNotNull(xattrs.get(security1)); + assertArrayEquals(new byte[0], userFs.getXAttr(filePath, security1), + "expected empty byte[] from getXAttr"); } catch (AccessControlException e) { fail("getXAttrs failed but expected it to succeed"); @@ -1328,9 +1345,9 @@ private void verifyFileAccess(FileSystem userFs, boolean expectOpenFailure) // Test that a file with the xattr can or can't be opened. try { userFs.open(filePath).read(); - assertFalse("open succeeded but expected it to fail", expectOpenFailure); + assertFalse(expectOpenFailure, "open succeeded but expected it to fail"); } catch (AccessControlException e) { - assertTrue("open failed but expected it to succeed", expectOpenFailure); + assertTrue(expectOpenFailure, "open failed but expected it to succeed"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java index fb467516791d0..2146dc6ee5aed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java @@ -21,6 +21,8 @@ import static org.apache.hadoop.fs.permission.AclEntryScope.*; import static org.apache.hadoop.fs.permission.AclEntryType.*; import static org.apache.hadoop.fs.permission.FsAction.*; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -30,10 +32,9 @@ import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Lists; -import org.junit.After; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.function.Executable; /** * Tests that the configuration flag that controls support for ACLs is off by @@ -46,10 +47,7 @@ public class TestAclConfigFlag { private MiniDFSCluster cluster; private DistributedFileSystem fs; - @Rule - public ExpectedException exception = ExpectedException.none(); - - @After + @AfterEach public void shutdown() throws Exception { IOUtils.cleanupWithLogger(null, fs); if (cluster != null) { @@ -62,52 +60,58 @@ public void shutdown() throws Exception { public void testModifyAclEntries() throws Exception { initCluster(true, false); fs.mkdirs(PATH); - expectException(); - fs.modifyAclEntries(PATH, Lists.newArrayList( - aclEntry(DEFAULT, USER, "foo", READ_WRITE))); + expectException(() -> { + fs.modifyAclEntries(PATH, Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", READ_WRITE))); + }); } @Test public void testRemoveAclEntries() throws Exception { initCluster(true, false); fs.mkdirs(PATH); - expectException(); - fs.removeAclEntries(PATH, Lists.newArrayList( - aclEntry(DEFAULT, USER, "foo", READ_WRITE))); + expectException(() -> { + fs.removeAclEntries(PATH, Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", READ_WRITE))); + }); } @Test public void testRemoveDefaultAcl() throws Exception { initCluster(true, false); fs.mkdirs(PATH); - expectException(); - fs.removeAclEntries(PATH, Lists.newArrayList( - aclEntry(DEFAULT, USER, "foo", READ_WRITE))); + expectException(() -> { + fs.removeAclEntries(PATH, Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", READ_WRITE))); + }); } @Test public void testRemoveAcl() throws Exception { initCluster(true, false); fs.mkdirs(PATH); - expectException(); - fs.removeAcl(PATH); + expectException(() -> { + fs.removeAcl(PATH); + }); } @Test public void testSetAcl() throws Exception { initCluster(true, false); fs.mkdirs(PATH); - expectException(); - fs.setAcl(PATH, Lists.newArrayList( - aclEntry(DEFAULT, USER, "foo", READ_WRITE))); + expectException(() -> { + fs.setAcl(PATH, Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", READ_WRITE))); + }); } @Test public void testGetAclStatus() throws Exception { initCluster(true, false); fs.mkdirs(PATH); - expectException(); - fs.getAclStatus(PATH); + expectException(() -> { + fs.getAclStatus(PATH); + }); } @Test @@ -141,9 +145,9 @@ public void testFsImage() throws Exception { * We expect an AclException, and we want the exception text to state the * configuration key that controls ACL support. */ - private void expectException() { - exception.expect(AclException.class); - exception.expectMessage(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY); + private void expectException(Executable exec) { + AclException ex = assertThrows(AclException.class, exec); + assertTrue(ex.getMessage().contains(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY)); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java index 91c1493454a57..07e466543cde2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java @@ -22,7 +22,8 @@ import static org.apache.hadoop.fs.permission.FsAction.*; import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; import static org.apache.hadoop.hdfs.server.namenode.AclTransformation.*; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import java.util.List; @@ -33,7 +34,7 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.junit.Test; +import org.junit.jupiter.api.Test; /** * Tests operations that modify ACLs. All tests in this suite have been @@ -309,58 +310,66 @@ public void testFilterAclEntriesByAclSpecEmptyAclSpec() throws AclException { assertEquals(existing, filterAclEntriesByAclSpec(existing, aclSpec)); } - @Test(expected=AclException.class) + @Test public void testFilterAclEntriesByAclSpecRemoveAccessMaskRequired() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)) - .add(aclEntry(ACCESS, USER, "bruce", READ)) - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, MASK, ALL)) - .add(aclEntry(ACCESS, OTHER, NONE)) - .build(); - List aclSpec = Lists.newArrayList( - aclEntry(ACCESS, MASK)); - filterAclEntriesByAclSpec(existing, aclSpec); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, MASK)); + filterAclEntriesByAclSpec(existing, aclSpec); + }); } - @Test(expected=AclException.class) + @Test public void testFilterAclEntriesByAclSpecRemoveDefaultMaskRequired() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)) - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, OTHER, NONE)) - .add(aclEntry(DEFAULT, USER, ALL)) - .add(aclEntry(DEFAULT, USER, "bruce", READ)) - .add(aclEntry(DEFAULT, GROUP, READ)) - .add(aclEntry(DEFAULT, MASK, ALL)) - .add(aclEntry(DEFAULT, OTHER, NONE)) - .build(); - List aclSpec = Lists.newArrayList( - aclEntry(DEFAULT, MASK)); - filterAclEntriesByAclSpec(existing, aclSpec); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, ALL)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, MASK)); + filterAclEntriesByAclSpec(existing, aclSpec); + }); } - @Test(expected=AclException.class) + @Test public void testFilterAclEntriesByAclSpecInputTooLarge() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)) - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, OTHER, NONE)) - .build(); - filterAclEntriesByAclSpec(existing, ACL_SPEC_TOO_LARGE); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + filterAclEntriesByAclSpec(existing, ACL_SPEC_TOO_LARGE); + }); } - @Test(expected = AclException.class) + @Test public void testFilterDefaultAclEntriesByAclSpecInputTooLarge() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(DEFAULT, USER, ALL)) - .add(aclEntry(DEFAULT, GROUP, READ)) - .add(aclEntry(DEFAULT, OTHER, NONE)) - .build(); - filterAclEntriesByAclSpec(existing, ACL_SPEC_DEFAULT_TOO_LARGE); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + filterAclEntriesByAclSpec(existing, ACL_SPEC_DEFAULT_TOO_LARGE); + }); } @Test @@ -721,99 +730,113 @@ public void testMergeAclEntriesEmptyAclSpec() throws AclException { assertEquals(existing, mergeAclEntries(existing, aclSpec)); } - @Test(expected=AclException.class) + @Test public void testMergeAclEntriesInputTooLarge() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)) - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, OTHER, NONE)) - .build(); - mergeAclEntries(existing, ACL_SPEC_TOO_LARGE); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + mergeAclEntries(existing, ACL_SPEC_TOO_LARGE); + }); } - @Test(expected=AclException.class) + @Test public void testMergeAclDefaultEntriesInputTooLarge() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(DEFAULT, USER, ALL)) - .add(aclEntry(DEFAULT, GROUP, READ)) - .add(aclEntry(DEFAULT, OTHER, NONE)) - .build(); - mergeAclEntries(existing, ACL_SPEC_DEFAULT_TOO_LARGE); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + mergeAclEntries(existing, ACL_SPEC_DEFAULT_TOO_LARGE); + }); } - @Test(expected=AclException.class) + @Test public void testMergeAclEntriesResultTooLarge() throws AclException { - ImmutableList.Builder aclBuilder = - new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)); - for (int i = 1; i <= 28; ++i) { - aclBuilder.add(aclEntry(ACCESS, USER, "user" + i, READ)); - } - aclBuilder - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, MASK, READ)) - .add(aclEntry(ACCESS, OTHER, NONE)); - List existing = aclBuilder.build(); - List aclSpec = Lists.newArrayList( - aclEntry(ACCESS, USER, "bruce", READ)); - mergeAclEntries(existing, aclSpec); + assertThrows(AclException.class, () -> { + ImmutableList.Builder aclBuilder = + new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)); + for (int i = 1; i <= 28; ++i) { + aclBuilder.add(aclEntry(ACCESS, USER, "user" + i, READ)); + } + aclBuilder + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)); + List existing = aclBuilder.build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", READ)); + mergeAclEntries(existing, aclSpec); + }); } - @Test(expected = AclException.class) + @Test public void testMergeAclDefaultEntriesResultTooLarge() throws AclException { - ImmutableList.Builder aclBuilder = - new ImmutableList.Builder() - .add(aclEntry(DEFAULT, USER, ALL)); - for (int i = 1; i <= 28; ++i) { - aclBuilder.add(aclEntry(DEFAULT, USER, "user" + i, READ)); - } - aclBuilder - .add(aclEntry(DEFAULT, GROUP, READ)) - .add(aclEntry(DEFAULT, MASK, READ)) - .add(aclEntry(DEFAULT, OTHER, NONE)); - List existing = aclBuilder.build(); - List aclSpec = Lists.newArrayList( - aclEntry(DEFAULT, USER, "bruce", READ)); - mergeAclEntries(existing, aclSpec); + assertThrows(AclException.class, () -> { + ImmutableList.Builder aclBuilder = + new ImmutableList.Builder() + .add(aclEntry(DEFAULT, USER, ALL)); + for (int i = 1; i <= 28; ++i) { + aclBuilder.add(aclEntry(DEFAULT, USER, "user" + i, READ)); + } + aclBuilder + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)); + List existing = aclBuilder.build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "bruce", READ)); + mergeAclEntries(existing, aclSpec); + }); } - @Test(expected=AclException.class) + @Test public void testMergeAclEntriesDuplicateEntries() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)) - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, OTHER, NONE)) - .build(); - List aclSpec = Lists.newArrayList( - aclEntry(ACCESS, USER, "bruce", ALL), - aclEntry(ACCESS, USER, "diana", READ_WRITE), - aclEntry(ACCESS, USER, "clark", READ), - aclEntry(ACCESS, USER, "bruce", READ_EXECUTE)); - mergeAclEntries(existing, aclSpec); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", ALL), + aclEntry(ACCESS, USER, "diana", READ_WRITE), + aclEntry(ACCESS, USER, "clark", READ), + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE)); + mergeAclEntries(existing, aclSpec); + }); } - @Test(expected=AclException.class) + @Test public void testMergeAclEntriesNamedMask() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)) - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, OTHER, NONE)) - .build(); - List aclSpec = Lists.newArrayList( - aclEntry(ACCESS, MASK, "bruce", READ_EXECUTE)); - mergeAclEntries(existing, aclSpec); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, MASK, "bruce", READ_EXECUTE)); + mergeAclEntries(existing, aclSpec); + }); } - @Test(expected=AclException.class) + @Test public void testMergeAclEntriesNamedOther() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)) - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, OTHER, NONE)) - .build(); - List aclSpec = Lists.newArrayList( - aclEntry(ACCESS, OTHER, "bruce", READ_EXECUTE)); - mergeAclEntries(existing, aclSpec); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, OTHER, "bruce", READ_EXECUTE)); + mergeAclEntries(existing, aclSpec); + }); } @Test @@ -1121,138 +1144,156 @@ public void testReplaceAclEntriesOnlyDefaults() throws AclException { assertEquals(expected, replaceAclEntries(existing, aclSpec)); } - @Test(expected=AclException.class) + @Test public void testReplaceAclEntriesInputTooLarge() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)) - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, OTHER, NONE)) - .build(); - replaceAclEntries(existing, ACL_SPEC_TOO_LARGE); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + replaceAclEntries(existing, ACL_SPEC_TOO_LARGE); + }); } - @Test(expected=AclException.class) + @Test public void testReplaceAclDefaultEntriesInputTooLarge() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(DEFAULT, USER, ALL)) - .add(aclEntry(DEFAULT, GROUP, READ)) - .add(aclEntry(DEFAULT, OTHER, NONE)) - .build(); - replaceAclEntries(existing, ACL_SPEC_DEFAULT_TOO_LARGE); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + replaceAclEntries(existing, ACL_SPEC_DEFAULT_TOO_LARGE); + }); } - @Test(expected=AclException.class) + @Test public void testReplaceAclEntriesResultTooLarge() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)) - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, OTHER, NONE)) - .build(); - List aclSpec = Lists.newArrayListWithCapacity(32); - aclSpec.add(aclEntry(ACCESS, USER, ALL)); - for (int i = 1; i <= 29; ++i) { - aclSpec.add(aclEntry(ACCESS, USER, "user" + i, READ)); - } - aclSpec.add(aclEntry(ACCESS, GROUP, READ)); - aclSpec.add(aclEntry(ACCESS, OTHER, NONE)); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayListWithCapacity(32); + aclSpec.add(aclEntry(ACCESS, USER, ALL)); + for (int i = 1; i <= 29; ++i) { + aclSpec.add(aclEntry(ACCESS, USER, "user" + i, READ)); + } + aclSpec.add(aclEntry(ACCESS, GROUP, READ)); + aclSpec.add(aclEntry(ACCESS, OTHER, NONE)); + replaceAclEntries(existing, aclSpec); + }); // The ACL spec now has 32 entries. Automatic mask calculation will push it - // over the limit to 33. - replaceAclEntries(existing, aclSpec); + } - @Test(expected=AclException.class) + @Test public void testReplaceAclEntriesDuplicateEntries() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)) - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, OTHER, NONE)) - .build(); - List aclSpec = Lists.newArrayList( - aclEntry(ACCESS, USER, ALL), - aclEntry(ACCESS, USER, "bruce", ALL), - aclEntry(ACCESS, USER, "diana", READ_WRITE), - aclEntry(ACCESS, USER, "clark", READ), - aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), - aclEntry(ACCESS, GROUP, READ), - aclEntry(ACCESS, OTHER, NONE)); - replaceAclEntries(existing, aclSpec); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "bruce", ALL), + aclEntry(ACCESS, USER, "diana", READ_WRITE), + aclEntry(ACCESS, USER, "clark", READ), + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, NONE)); + replaceAclEntries(existing, aclSpec); + }); } - @Test(expected=AclException.class) + @Test public void testReplaceAclEntriesNamedMask() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)) - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, OTHER, NONE)) - .build(); - List aclSpec = Lists.newArrayList( - aclEntry(ACCESS, USER, ALL), - aclEntry(ACCESS, GROUP, READ), - aclEntry(ACCESS, OTHER, NONE), - aclEntry(ACCESS, MASK, "bruce", READ_EXECUTE)); - replaceAclEntries(existing, aclSpec); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(ACCESS, MASK, "bruce", READ_EXECUTE)); + replaceAclEntries(existing, aclSpec); + }); } - @Test(expected=AclException.class) + @Test public void testReplaceAclEntriesNamedOther() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)) - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, OTHER, NONE)) - .build(); - List aclSpec = Lists.newArrayList( - aclEntry(ACCESS, USER, ALL), - aclEntry(ACCESS, GROUP, READ), - aclEntry(ACCESS, OTHER, NONE), - aclEntry(ACCESS, OTHER, "bruce", READ_EXECUTE)); - replaceAclEntries(existing, aclSpec); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(ACCESS, OTHER, "bruce", READ_EXECUTE)); + replaceAclEntries(existing, aclSpec); + }); } - @Test(expected=AclException.class) + @Test public void testReplaceAclEntriesMissingUser() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)) - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, OTHER, NONE)) - .build(); - List aclSpec = Lists.newArrayList( - aclEntry(ACCESS, USER, "bruce", READ_WRITE), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(ACCESS, GROUP, "sales", ALL), - aclEntry(ACCESS, MASK, ALL), - aclEntry(ACCESS, OTHER, NONE)); - replaceAclEntries(existing, aclSpec); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", READ_WRITE), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, GROUP, "sales", ALL), + aclEntry(ACCESS, MASK, ALL), + aclEntry(ACCESS, OTHER, NONE)); + replaceAclEntries(existing, aclSpec); + }); } - @Test(expected=AclException.class) + @Test public void testReplaceAclEntriesMissingGroup() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)) - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, OTHER, NONE)) - .build(); - List aclSpec = Lists.newArrayList( - aclEntry(ACCESS, USER, ALL), - aclEntry(ACCESS, USER, "bruce", READ_WRITE), - aclEntry(ACCESS, GROUP, "sales", ALL), - aclEntry(ACCESS, MASK, ALL), - aclEntry(ACCESS, OTHER, NONE)); - replaceAclEntries(existing, aclSpec); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "bruce", READ_WRITE), + aclEntry(ACCESS, GROUP, "sales", ALL), + aclEntry(ACCESS, MASK, ALL), + aclEntry(ACCESS, OTHER, NONE)); + replaceAclEntries(existing, aclSpec); + }); } - @Test(expected=AclException.class) + @Test public void testReplaceAclEntriesMissingOther() throws AclException { - List existing = new ImmutableList.Builder() - .add(aclEntry(ACCESS, USER, ALL)) - .add(aclEntry(ACCESS, GROUP, READ)) - .add(aclEntry(ACCESS, OTHER, NONE)) - .build(); - List aclSpec = Lists.newArrayList( - aclEntry(ACCESS, USER, ALL), - aclEntry(ACCESS, USER, "bruce", READ_WRITE), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(ACCESS, GROUP, "sales", ALL), - aclEntry(ACCESS, MASK, ALL)); - replaceAclEntries(existing, aclSpec); + assertThrows(AclException.class, () -> { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "bruce", READ_WRITE), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, GROUP, "sales", ALL), + aclEntry(ACCESS, MASK, ALL)); + replaceAclEntries(existing, aclSpec); + }); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java index cfed8d23ce381..6278768636019 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java @@ -21,9 +21,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.File; import java.io.IOException; @@ -42,9 +42,9 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.StringUtils; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; /** * Startup and format tests @@ -59,7 +59,7 @@ public class TestAllowFormat { private static Configuration config; private static MiniDFSCluster cluster = null; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { config = new Configuration(); if ( DFS_BASE_DIR.exists() && !FileUtil.fullyDelete(DFS_BASE_DIR) ) { @@ -90,7 +90,7 @@ public static void setUp() throws Exception { /** * clean up */ - @AfterClass + @AfterAll public static void tearDown() throws Exception { if (cluster!=null) { cluster.shutdown(); @@ -136,9 +136,10 @@ public void testAllowFormat() throws IOException { fail("Format succeeded, when it should have failed"); } catch (IOException e) { // expected to fail // Verify we got message we expected - assertTrue("Exception was not about formatting Namenode", + assertTrue( e.getMessage().startsWith("The option " + - DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY)); + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY), + "Exception was not about formatting Namenode"); LOG.info("Expected failure: " + StringUtils.stringifyException(e)); LOG.info("Done verifying format will fail with allowformat false"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java index 8481b5753f8cf..fbaca9442742f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java @@ -17,11 +17,11 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.File; import java.io.IOException; @@ -55,8 +55,8 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; import org.slf4j.event.Level; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; @@ -77,7 +77,7 @@ public class TestBackupNode { static final int blockSize = 4096; static final int fileSize = 8192; - @Before + @BeforeEach public void setUp() throws Exception { File baseDir = new File(BASE_DIR); if(baseDir.exists()) @@ -110,10 +110,9 @@ BackupNode startBackupNode(Configuration conf, BackupNode bn = (BackupNode)NameNode.createNameNode( new String[]{startupOpt.getName()}, c); - assertTrue(bn.getRole() + " must be in SafeMode.", bn.isInSafeMode()); - assertTrue(bn.getRole() + " must be in StandbyState", - bn.getNamesystem().getHAState() - .equalsIgnoreCase(HAServiceState.STANDBY.name())); + assertTrue(bn.isInSafeMode(), bn.getRole() + " must be in SafeMode."); + assertTrue(bn.getNamesystem().getHAState().equalsIgnoreCase(HAServiceState.STANDBY.name()), + bn.getRole() + " must be in StandbyState"); return bn; } @@ -182,8 +181,7 @@ public void startBackupNodeWithIncorrectAuthentication() throws IOException { try { bn = (BackupNode)NameNode.createNameNode( new String[] {startupOpt.getName()}, c); - assertTrue("Namesystem in BackupNode should be null", - bn.getNamesystem() == null); + assertTrue(bn.getNamesystem() == null, "Namesystem in BackupNode should be null"); fail("Incorrect authentication setting should throw IOException"); } catch (IOException e) { LOG.info("IOException thrown.", e); @@ -248,9 +246,8 @@ public void testBackupNodeTailsEdits() throws Exception { long nnImageAfter = nn.getFSImage().getStorage().getMostRecentCheckpointTxId(); - assertTrue("nn should have received new checkpoint. before: " + - nnImageBefore + " after: " + nnImageAfter, - nnImageAfter > nnImageBefore); + assertTrue(nnImageAfter > nnImageBefore, "nn should have received new checkpoint. before: " + + nnImageBefore + " after: " + nnImageAfter); // BN should stay in sync after checkpoint testBNInSync(cluster, backup, 3); @@ -263,10 +260,8 @@ public void testBackupNodeTailsEdits() throws Exception { // When shutting down the BN, it shouldn't finalize logs that are // still open on the NN EditLogFile editsLog = FSImageTestUtil.findLatestEditsLog(sd); - assertEquals(editsLog.getFirstTxId(), - nn.getFSImage().getEditLog().getCurSegmentTxId()); - assertTrue("Should not have finalized " + editsLog, - editsLog.isInProgress()); + assertEquals(editsLog.getFirstTxId(), nn.getFSImage().getEditLog().getCurSegmentTxId()); + assertTrue(editsLog.isInProgress(), "Should not have finalized " + editsLog); // do some edits assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down"))); @@ -388,7 +383,7 @@ void testCheckpoint(StartupOption op) throws Exception { waitCheckpointDone(cluster, txid); } catch(IOException e) { LOG.error("Error in TestBackupNode:", e); - assertTrue(e.getLocalizedMessage(), false); + assertTrue(false, e.getLocalizedMessage()); } finally { if(backup != null) backup.stop(); if(fileSys != null) fileSys.close(); @@ -447,7 +442,7 @@ void testCheckpoint(StartupOption op) throws Exception { LOG.info("Write to " + backup.getRole() + " failed as expected: ", eio); canWrite = false; } - assertFalse("Write to BackupNode must be prohibited.", canWrite); + assertFalse(canWrite, "Write to BackupNode must be prohibited."); // Reads are allowed for BackupNode, but not for CheckpointNode boolean canRead = true; @@ -457,18 +452,18 @@ void testCheckpoint(StartupOption op) throws Exception { LOG.info("Read from " + backup.getRole() + " failed: ", eio); canRead = false; } - assertEquals("Reads to BackupNode are allowed, but not CheckpointNode.", - canRead, backup.isRole(NamenodeRole.BACKUP)); + assertEquals(canRead, backup.isRole(NamenodeRole.BACKUP), + "Reads to BackupNode are allowed, but not CheckpointNode."); DFSTestUtil.createFile(fileSys, file3, fileSize, fileSize, blockSize, replication, seed); TestCheckpoint.checkFile(fileSys, file3, replication); // should also be on BN right away - assertTrue("file3 does not exist on BackupNode", - op != StartupOption.BACKUP || + assertTrue(op != StartupOption.BACKUP || backup.getNamesystem().getFileInfo( - file3.toUri().getPath(), false, false, false) != null); + file3.toUri().getPath(), false, false, false) != null, + "file3 does not exist on BackupNode"); } catch(IOException e) { LOG.error("Error in TestBackupNode:", e); @@ -496,7 +491,7 @@ void testCheckpoint(StartupOption op) throws Exception { assertTrue(fileSys.exists(file2)); } catch(IOException e) { LOG.error("Error in TestBackupNode: ", e); - assertTrue(e.getLocalizedMessage(), false); + assertTrue(false, e.getLocalizedMessage()); } finally { fileSys.close(); if (cluster != null) { @@ -546,11 +541,10 @@ public void testCanReadData() throws IOException { new Path("hdfs://" + bnAddr).toUri(), conf); String nnData = DFSTestUtil.readFile(fileSys, file1); String bnData = DFSTestUtil.readFile(bnFS, file1); - assertEquals("Data read from BackupNode and NameNode is not the same.", - nnData, bnData); + assertEquals(nnData, bnData, "Data read from BackupNode and NameNode is not the same."); } catch(IOException e) { LOG.error("Error in TestBackupNode: ", e); - assertTrue(e.getLocalizedMessage(), false); + assertTrue(false, e.getLocalizedMessage()); } finally { if(fileSys != null) fileSys.close(); if(backup != null) backup.stop(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java index 9731f27a002ac..d4f3b70948175 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; @@ -36,7 +36,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.junit.Test; +import org.junit.jupiter.api.Test; /** * This class tests the creation and validation of a checkpoint. @@ -90,8 +90,7 @@ public void testSaveNamespace() throws IOException { assertTrue(log.isInProgress()); log.scanLog(Long.MAX_VALUE, true); long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1; - assertEquals("In-progress log " + log + " should have 5 transactions", - 5, numTransactions); + assertEquals(5, numTransactions, "In-progress log " + log + " should have 5 transactions"); } // Saving image in safe mode should succeed @@ -107,8 +106,7 @@ public void testSaveNamespace() throws IOException { assertTrue(log.isInProgress()); log.scanLog(Long.MAX_VALUE, true); long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1; - assertEquals("In-progress log " + log + " should only have START txn", - 1, numTransactions); + assertEquals(1, numTransactions, "In-progress log " + log + " should only have START txn"); } // restart cluster diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index d675dcda988b2..73a4a52611b44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -24,13 +24,13 @@ import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt; import static org.apache.hadoop.test.MetricsAsserts.assertGaugeGt; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.File; import java.io.FileOutputStream; @@ -89,9 +89,10 @@ import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.slf4j.event.Level; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -128,14 +129,14 @@ public boolean accept(File dir, String name) { private CheckpointFaultInjector faultInjector; - @Before + @BeforeEach public void setUp() { FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory())); faultInjector = Mockito.mock(CheckpointFaultInjector.class); CheckpointFaultInjector.instance = faultInjector; } - @After + @AfterEach public void checkForSNNThreads() { GenericTestUtils.assertNoThreadsMatching(".*SecondaryNameNode.*"); } @@ -144,7 +145,7 @@ static void checkFile(FileSystem fileSys, Path name, int repl) throws IOException { assertTrue(fileSys.exists(name)); int replication = fileSys.getFileStatus(name).getReplication(); - assertEquals("replication for " + name, repl, replication); + assertEquals(repl, replication, "replication for " + name); //We should probably test for more of the file properties. } @@ -201,36 +202,36 @@ public void testWriteTransactionIdHandlesIOE() throws Exception { ArrayList editsDirs = new ArrayList(); File filePath = new File(PathUtils.getTestDir(getClass()), "storageDirToCheck"); - assertTrue("Couldn't create directory storageDirToCheck", - filePath.exists() || filePath.mkdirs()); + assertTrue(filePath.exists() || filePath.mkdirs(), + "Couldn't create directory storageDirToCheck"); fsImageDirs.add(filePath.toURI()); editsDirs.add(filePath.toURI()); NNStorage nnStorage = new NNStorage(new HdfsConfiguration(), fsImageDirs, editsDirs); try { - assertTrue("List of storage directories didn't have storageDirToCheck.", - nnStorage.getEditsDirectories().iterator().next(). - toString().indexOf("storageDirToCheck") != -1); - assertTrue("List of removed storage directories wasn't empty", - nnStorage.getRemovedStorageDirs().isEmpty()); + assertTrue(nnStorage.getEditsDirectories().iterator().next().toString() + .indexOf("storageDirToCheck") != -1, + "List of storage directories didn't have storageDirToCheck."); + assertTrue(nnStorage.getRemovedStorageDirs().isEmpty(), + "List of removed storage directories wasn't empty"); } finally { // Delete storage directory to cause IOException in writeTransactionIdFile - assertTrue("Couldn't remove directory " + filePath.getAbsolutePath(), - filePath.delete()); + assertTrue(filePath.delete(), "Couldn't remove directory " + filePath.getAbsolutePath()); } // Just call writeTransactionIdFile using any random number nnStorage.writeTransactionIdFileToStorage(1); List listRsd = nnStorage.getRemovedStorageDirs(); - assertTrue("Removed directory wasn't what was expected", - listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot(). - toString().indexOf("storageDirToCheck") != -1); + assertTrue(listRsd.size() > 0 + && listRsd.get(listRsd.size() - 1).getRoot().toString().indexOf("storageDirToCheck") != -1, + "Removed directory wasn't what was expected"); nnStorage.close(); } /* * Simulate exception during edit replay. */ - @Test(timeout=30000) + @Test + @Timeout(value = 30) public void testReloadOnEditReplayFailure () throws IOException { Configuration conf = new HdfsConfiguration(); FSDataOutputStream fos = null; @@ -267,8 +268,7 @@ public void testReloadOnEditReplayFailure () throws IOException { fos.write(new byte[] { 0, 1, 2, 3 }); fos.hsync(); - assertTrue("Another checkpoint should have reloaded image", - secondary.doCheckpoint()); + assertTrue(secondary.doCheckpoint(), "Another checkpoint should have reloaded image"); } finally { if (fs != null) { fs.close(); @@ -284,7 +284,8 @@ public void testReloadOnEditReplayFailure () throws IOException { /* * Simulate 2NN exit due to too many merge failures. */ - @Test(timeout=30000) + @Test + @Timeout(value = 30) public void testTooManyEditReplayFailures() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY, 1); @@ -315,7 +316,7 @@ public void testTooManyEditReplayFailures() throws IOException { } catch (ExitException ee) { // ignore ExitUtil.resetFirstExitException(); - assertEquals("Max retries", 1, secondary.getMergeErrorCount() - 1); + assertEquals(1, secondary.getMergeErrorCount() - 1, "Max retries"); } finally { if (fs != null) { fs.close(); @@ -830,8 +831,8 @@ public void testSecondaryNameNodeLocking() throws Exception { savedSd.lock(); try { secondary = startSecondaryNameNode(conf); - assertFalse("Should fail to start 2NN when " + savedSd + " is locked", - savedSd.isLockSupported()); + assertFalse(savedSd.isLockSupported(), + "Should fail to start 2NN when " + savedSd + " is locked"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains("already locked", ioe); } finally { @@ -876,8 +877,8 @@ public void testStorageAlreadyLockedErrorMessage() throws Exception { " " + ManagementFactory.getRuntimeMXBean().getName(); String expectedLogMessage = "It appears that another node " + lockingJvmName + " has already locked the storage directory"; - assertTrue("Log output does not contain expected log message: " - + expectedLogMessage, logs.getOutput().contains(expectedLogMessage)); + assertTrue(logs.getOutput().contains(expectedLogMessage), + "Log output does not contain expected log message: " + expectedLogMessage); } } finally { cleanup(cluster); @@ -913,8 +914,8 @@ private static void assertClusterStartFailsWhenDirLocked( try { cluster = new MiniDFSCluster.Builder(conf).format(false) .manageNameDfsDirs(false).numDataNodes(0).build(); - assertFalse("cluster should fail to start after locking " + - sdToLock, sdToLock.isLockSupported()); + assertFalse(sdToLock.isLockSupported(), + "cluster should fail to start after locking " + sdToLock); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains("already locked", ioe); } finally { @@ -983,8 +984,8 @@ public void testImportCheckpoint() throws Exception { cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(0) .startupOption(StartupOption.IMPORT).build(); - assertTrue("Path from checkpoint should exist after import", - cluster.getFileSystem().exists(testPath)); + assertTrue(cluster.getFileSystem().exists(testPath), + "Path from checkpoint should exist after import"); // Make sure that the image got saved on import FSImageTestUtil.assertNNHasCheckpoints(cluster, Ints.asList(3)); @@ -1221,8 +1222,7 @@ public void testSaveNamespace() throws IOException { File savedImage = new File(imageDir, "current/" + NNStorage.getImageFileName( EXPECTED_TXNS_FIRST_SEG)); - assertTrue("Should have saved image at " + savedImage, - savedImage.exists()); + assertTrue(savedImage.exists(), "Should have saved image at " + savedImage); } // restart cluster and verify file exists @@ -1265,7 +1265,7 @@ public void testCheckpointSignature() throws IOException { sig.clusterID = "somerandomcid"; try { sig.validateStorageInfo(nn.getFSImage()); // this should fail - assertTrue("This test is expected to fail.", false); + assertTrue(false, "This test is expected to fail."); } catch (Exception ignored) { } } finally { @@ -1379,10 +1379,8 @@ public void testMultipleSecondaryNamenodes() throws IOException { secondary2 = startSecondaryNameNode(snConf2); // make sure the two secondary namenodes are talking to correct namenodes. - assertEquals(secondary1.getNameNodeAddress().getPort(), - nn1RpcAddress.getPort()); - assertEquals(secondary2.getNameNodeAddress().getPort(), - nn2RpcAddress.getPort()); + assertEquals(secondary1.getNameNodeAddress().getPort(), nn1RpcAddress.getPort()); + assertEquals(secondary2.getNameNodeAddress().getPort(), nn2RpcAddress.getPort()); assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2 .getNameNodeAddress().getPort()); @@ -1435,35 +1433,29 @@ public void testSecondaryImageDownload() throws IOException { File secondaryFsImageAfter = new File(secondaryCurrent, NNStorage.getImageFileName(expectedTxIdToDownload + 2)); - assertFalse("Secondary should start with empty current/ dir " + - "but " + secondaryFsImageBefore + " exists", - secondaryFsImageBefore.exists()); + assertFalse(secondaryFsImageBefore.exists(), "Secondary should start with empty current/ dir " + + "but " + secondaryFsImageBefore + " exists"); - assertTrue("Secondary should have loaded an image", - secondary.doCheckpoint()); - - assertTrue("Secondary should have downloaded original image", - secondaryFsImageBefore.exists()); - assertTrue("Secondary should have created a new image", - secondaryFsImageAfter.exists()); + assertTrue(secondary.doCheckpoint(), "Secondary should have loaded an image"); + + assertTrue(secondaryFsImageBefore.exists(), + "Secondary should have downloaded original image"); + assertTrue(secondaryFsImageAfter.exists(), "Secondary should have created a new image"); long fsimageLength = secondaryFsImageBefore.length(); - assertEquals("Image size should not have changed", - fsimageLength, - secondaryFsImageAfter.length()); + assertEquals(fsimageLength, secondaryFsImageAfter.length(), + "Image size should not have changed"); // change namespace fileSys.mkdirs(dir); - assertFalse("Another checkpoint should not have to re-load image", - secondary.doCheckpoint()); + assertFalse(secondary.doCheckpoint(), "Another checkpoint should not have to re-load image"); for (StorageDirectory sd : image.getStorage().dirIterable(NameNodeDirType.IMAGE)) { File imageFile = NNStorage.getImageFile(sd, NameNodeFile.IMAGE, expectedTxIdToDownload + 5); - assertTrue("Image size increased", - imageFile.length() > fsimageLength); + assertTrue(imageFile.length() > fsimageLength, "Image size increased"); } } finally { @@ -1479,7 +1471,8 @@ public void testSecondaryImageDownload() throws IOException { * Test NN restart if a failure happens in between creating the fsimage * MD5 file and renaming the fsimage. */ - @Test(timeout=30000) + @Test + @Timeout(value = 30) public void testFailureBeforeRename () throws IOException { Configuration conf = new HdfsConfiguration(); FSDataOutputStream fos = null; @@ -1528,7 +1521,8 @@ public void testFailureBeforeRename () throws IOException { * Test that a fault while downloading edits does not prevent future * checkpointing */ - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testEditFailureBeforeRename() throws IOException { Configuration conf = new HdfsConfiguration(); SecondaryNameNode secondary = null; @@ -1561,8 +1555,8 @@ public void testEditFailureBeforeRename() throws IOException { .dirIterable(NameNodeDirType.EDITS)) { File[] tmpEdits = sd.getCurrentDir().listFiles(tmpEditsFilter); assertTrue( - "Expected a single tmp edits file in directory " + sd.toString(), - tmpEdits.length == 1); + + tmpEdits.length == 1, "Expected a single tmp edits file in directory " + sd.toString()); RandomAccessFile randFile = new RandomAccessFile(tmpEdits[0], "rw"); randFile.setLength(0); randFile.close(); @@ -1587,7 +1581,8 @@ public void testEditFailureBeforeRename() throws IOException { * Test that a fault while downloading edits the first time after the 2NN * starts up does not prevent future checkpointing. */ - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testEditFailureOnFirstCheckpoint() throws IOException { Configuration conf = new HdfsConfiguration(); SecondaryNameNode secondary = null; @@ -1641,7 +1636,8 @@ public void testEditFailureOnFirstCheckpoint() throws IOException { * Test that the secondary namenode correctly deletes temporary edits * on startup. */ - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testDeleteTemporaryEditsOnStartup() throws IOException { Configuration conf = new HdfsConfiguration(); SecondaryNameNode secondary = null; @@ -1675,8 +1671,8 @@ public void testDeleteTemporaryEditsOnStartup() throws IOException { .dirIterable(NameNodeDirType.EDITS)) { File[] tmpEdits = sd.getCurrentDir().listFiles(tmpEditsFilter); assertTrue( - "Expected a single tmp edits file in directory " + sd.toString(), - tmpEdits.length == 1); + + tmpEdits.length == 1, "Expected a single tmp edits file in directory " + sd.toString()); } // Restart 2NN secondary.shutdown(); @@ -1686,8 +1682,8 @@ public void testDeleteTemporaryEditsOnStartup() throws IOException { .dirIterable(NameNodeDirType.EDITS)) { File[] tmpEdits = sd.getCurrentDir().listFiles(tmpEditsFilter); assertTrue( - "Did not expect a tmp edits file in directory " + sd.toString(), - tmpEdits.length == 0); + + tmpEdits.length == 0, "Did not expect a tmp edits file in directory " + sd.toString()); } // Next checkpoint should succeed secondary.doCheckpoint(); @@ -2002,7 +1998,7 @@ public void testNamespaceVerifiedOnFileTransfer() throws IOException { fail("Storage info was not verified"); } catch (IOException ioe) { String msg = StringUtils.stringifyException(ioe); - assertTrue(msg, msg.contains("but the secondary expected")); + assertTrue(msg.contains("but the secondary expected"), msg); } try { @@ -2010,7 +2006,7 @@ public void testNamespaceVerifiedOnFileTransfer() throws IOException { fail("Storage info was not verified"); } catch (IOException ioe) { String msg = StringUtils.stringifyException(ioe); - assertTrue(msg, msg.contains("but the secondary expected")); + assertTrue(msg.contains("but the secondary expected"), msg); } try { @@ -2019,7 +2015,7 @@ public void testNamespaceVerifiedOnFileTransfer() throws IOException { fail("Storage info was not verified"); } catch (IOException ioe) { String msg = StringUtils.stringifyException(ioe); - assertTrue(msg, msg.contains("but the secondary expected")); + assertTrue(msg.contains("but the secondary expected"), msg); } } finally { cleanup(cluster); @@ -2162,7 +2158,8 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { /** * Test that the 2NN triggers a checkpoint after the configurable interval */ - @Test(timeout=30000) + @Test + @Timeout(value = 30) public void testCheckpointTriggerOnTxnCount() throws Exception { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; @@ -2280,8 +2277,7 @@ public void testSecondaryPurgesEditLogs() throws IOException { for (File checkpointDir : checkpointDirs) { List editsFiles = FileJournalManager.matchEditLogs( checkpointDir); - assertEquals("Edit log files were not purged from 2NN", 1, - editsFiles.size()); + assertEquals(1, editsFiles.size(), "Edit log files were not purged from 2NN"); } } finally { @@ -2392,18 +2388,15 @@ public void testCommandLineParsing() throws ParseException { assertNull(opts.getCommand()); opts.parse("-checkpoint"); - assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT, - opts.getCommand()); + assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT, opts.getCommand()); assertFalse(opts.shouldForceCheckpoint()); opts.parse("-checkpoint", "force"); - assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT, - opts.getCommand()); + assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT, opts.getCommand()); assertTrue(opts.shouldForceCheckpoint()); opts.parse("-geteditsize"); - assertEquals(SecondaryNameNode.CommandLineOpts.Command.GETEDITSIZE, - opts.getCommand()); + assertEquals(SecondaryNameNode.CommandLineOpts.Command.GETEDITSIZE, opts.getCommand()); opts.parse("-format"); assertTrue(opts.shouldFormat()); @@ -2444,14 +2437,14 @@ public void testLegacyOivImage() throws Exception { // Checkpoint once secondary.doCheckpoint(); String files1[] = tmpDir.list(); - assertEquals("Only one file is expected", 1, files1.length); + assertEquals(1, files1.length, "Only one file is expected"); // Perform more checkpointngs and check whether retention management // is working. secondary.doCheckpoint(); secondary.doCheckpoint(); String files2[] = tmpDir.list(); - assertEquals("Two files are expected", 2, files2.length); + assertEquals(2, files2.length, "Two files are expected"); // Verify that the first file is deleted. for (String fName : files2) { @@ -2464,7 +2457,8 @@ public void testLegacyOivImage() throws Exception { } } - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testActiveRejectSmallerTxidDeltaImage() throws Exception { MiniDFSCluster cluster = null; Configuration conf = new HdfsConfiguration(); @@ -2487,8 +2481,7 @@ public void testActiveRejectSmallerTxidDeltaImage() throws Exception { secondary = startSecondaryNameNode(conf); FileSystem fs = cluster.getFileSystem(); - assertEquals(0, active.getNamesystem().getFSImage() - .getMostRecentCheckpointTxId()); + assertEquals(0, active.getNamesystem().getFSImage().getMostRecentCheckpointTxId()); // create 5 dir. for (int i = 0; i < 5; i++) { @@ -2499,8 +2492,7 @@ public void testActiveRejectSmallerTxidDeltaImage() throws Exception { secondary.doCheckpoint(); // at this point, the txid delta is smaller than threshold 10. // active does not accept this image. - assertEquals(0, active.getNamesystem().getFSImage() - .getMostRecentCheckpointTxId()); + assertEquals(0, active.getNamesystem().getFSImage().getMostRecentCheckpointTxId()); // create another 10 dir. for (int i = 0; i < 10; i++) { @@ -2510,8 +2502,7 @@ public void testActiveRejectSmallerTxidDeltaImage() throws Exception { // Checkpoint 2nd secondary.doCheckpoint(); // here the delta is large enough and active accepts this image. - assertEquals(21, active.getNamesystem().getFSImage() - .getMostRecentCheckpointTxId()); + assertEquals(21, active.getNamesystem().getFSImage().getMostRecentCheckpointTxId()); } finally { cleanup(secondary); cleanup(cluster); @@ -2549,8 +2540,7 @@ public void testActiveImageWithTimeDeltaRelaxation() throws Exception { secondary = startSecondaryNameNode(conf); FileSystem fs = cluster.getFileSystem(); - assertEquals(0, active.getNamesystem().getFSImage() - .getMostRecentCheckpointTxId()); + assertEquals(0, active.getNamesystem().getFSImage().getMostRecentCheckpointTxId()); // create 5 dir. for (int i = 0; i < 5; i++) { @@ -2562,8 +2552,7 @@ public void testActiveImageWithTimeDeltaRelaxation() throws Exception { // at this point, despite this is a small delta change, w.r.t both // txid and time delta, due to we set relaxation to 0, this image // still gets accepted - assertEquals(9, active.getNamesystem().getFSImage() - .getMostRecentCheckpointTxId()); + assertEquals(9, active.getNamesystem().getFSImage().getMostRecentCheckpointTxId()); } finally { cleanup(secondary); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClientNameNodeAddress.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClientNameNodeAddress.java index 829680e3bb91f..f6f9b76cebb5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClientNameNodeAddress.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClientNameNodeAddress.java @@ -20,36 +20,31 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.*; - +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertNull; /** * Test that {@link NameNodeUtils#getClientNamenodeAddress} correctly * computes the client address for WebHDFS redirects for different * combinations of HA, federated and single NN setups. */ +@Timeout(300) public class TestClientNameNodeAddress { public static final Logger LOG = LoggerFactory.getLogger( TestClientNameNodeAddress.class); - @Rule - public Timeout globalTimeout = new Timeout(300000); - @Test public void testSimpleConfig() { final Configuration conf = new HdfsConfiguration(); conf.set(FS_DEFAULT_NAME_KEY, "hdfs://host1:100"); - assertThat(NameNodeUtils.getClientNamenodeAddress(conf, null), - is("host1:100")); + assertThat(NameNodeUtils.getClientNamenodeAddress(conf, null)).isEqualTo("host1:100"); } @Test @@ -81,12 +76,10 @@ public void testFederationWithHa() { conf.set(DFS_HA_NAMENODES_KEY_PREFIX + ".ns2", "nn1,nn2"); // The current namenode belongs to ns1 and ns1 is the default nameservice. - assertThat(NameNodeUtils.getClientNamenodeAddress(conf, "ns1"), - is("ns1")); + assertThat(NameNodeUtils.getClientNamenodeAddress(conf, "ns1")).isEqualTo("ns1"); // The current namenode belongs to ns2 and ns1 is the default nameservice. - assertThat(NameNodeUtils.getClientNamenodeAddress(conf, "ns2"), - is("ns2")); + assertThat(NameNodeUtils.getClientNamenodeAddress(conf, "ns2")).isEqualTo("ns2"); } @Test @@ -96,9 +89,7 @@ public void testFederationWithoutHa() { conf.set(DFS_NAMESERVICES, "ns1,ns2"); conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY + ".ns1", "host1:100"); conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY + ".ns2", "host2:200"); - assertThat(NameNodeUtils.getClientNamenodeAddress(conf, "ns1"), - is("host1:100")); - assertThat(NameNodeUtils.getClientNamenodeAddress(conf, "ns2"), - is("host2:200")); + assertThat(NameNodeUtils.getClientNamenodeAddress(conf, "ns1")).isEqualTo("host1:100"); + assertThat(NameNodeUtils.getClientNamenodeAddress(conf, "ns2")).isEqualTo("host2:200"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorrectnessOfQuotaAfterRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorrectnessOfQuotaAfterRenameOp.java index f51bbfa0b61ac..64df2e6f1147f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorrectnessOfQuotaAfterRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorrectnessOfQuotaAfterRenameOp.java @@ -30,8 +30,8 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import java.io.IOException; @@ -44,7 +44,7 @@ public class TestCorrectnessOfQuotaAfterRenameOp { private static MiniDFSCluster cluster; private static DistributedFileSystem dfs; - @BeforeClass + @BeforeAll public static void setUp() throws IOException { HdfsConfiguration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java index 8b9a9d012e29b..c42ec40758c13 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java @@ -17,9 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; @@ -51,11 +54,11 @@ import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -69,7 +72,7 @@ public class TestDiskspaceQuotaUpdate { private static Configuration conf; private static MiniDFSCluster cluster; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); @@ -78,7 +81,7 @@ public static void setUp() throws Exception { cluster.waitActive(); } - @Before + @BeforeEach public void resetCluster() throws Exception { if (!cluster.isClusterUp()) { // Previous test seems to have left cluster in a bad state; @@ -90,7 +93,7 @@ public void resetCluster() throws Exception { } } - @AfterClass + @AfterAll public static void tearDown() throws Exception { if (cluster != null) { cluster.shutdown(); @@ -113,7 +116,8 @@ private DistributedFileSystem getDFS() throws IOException { /** * Test if the quota can be correctly updated for create file */ - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testQuotaUpdateWithFileCreate() throws Exception { final Path foo = new Path(getParent(GenericTestUtils.getMethodName()), "foo"); @@ -135,7 +139,8 @@ public void testQuotaUpdateWithFileCreate() throws Exception { /** * Test if the quota can be correctly updated for append */ - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testUpdateQuotaForAppend() throws Exception { final Path foo = new Path(getParent(GenericTestUtils.getMethodName()), "foo"); @@ -190,7 +195,8 @@ public void testUpdateQuotaForAppend() throws Exception { * Test if the quota can be correctly updated when file length is updated * through fsync */ - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testUpdateQuotaForFSync() throws Exception { final Path foo = new Path(getParent(GenericTestUtils.getMethodName()), "foo"); @@ -234,7 +240,8 @@ public void testUpdateQuotaForFSync() throws Exception { /** * Test append over storage quota does not mark file as UC or create lease */ - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testAppendOverStorageQuota() throws Exception { final Path dir = getParent(GenericTestUtils.getMethodName()); final Path file = new Path(dir, "file"); @@ -251,7 +258,7 @@ public void testAppendOverStorageQuota() throws Exception { .getSpaceConsumed().getStorageSpace(); try { DFSTestUtil.appendFile(getDFS(), file, BLOCKSIZE); - Assert.fail("append didn't fail"); + fail("append didn't fail"); } catch (DSQuotaExceededException e) { // ignore } @@ -259,9 +266,9 @@ public void testAppendOverStorageQuota() throws Exception { LeaseManager lm = cluster.getNamesystem().getLeaseManager(); // check that the file exists, isn't UC, and has no dangling lease INodeFile inode = getFSDirectory().getINode(file.toString()).asFile(); - Assert.assertNotNull(inode); - Assert.assertFalse("should not be UC", inode.isUnderConstruction()); - Assert.assertNull("should not have a lease", lm.getLease(inode)); + assertNotNull(inode); + assertFalse(inode.isUnderConstruction(), "should not be UC"); + assertNull(lm.getLease(inode), "should not have a lease"); // make sure the quota usage is unchanged final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature() .getSpaceConsumed().getStorageSpace(); @@ -275,7 +282,8 @@ public void testAppendOverStorageQuota() throws Exception { * Test append over a specific type of storage quota does not mark file as * UC or create a lease */ - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testAppendOverTypeQuota() throws Exception { final Path dir = getParent(GenericTestUtils.getMethodName()); final Path file = new Path(dir, "file"); @@ -294,7 +302,7 @@ public void testAppendOverTypeQuota() throws Exception { .getSpaceConsumed().getStorageSpace(); try { DFSTestUtil.appendFile(getDFS(), file, BLOCKSIZE); - Assert.fail("append didn't fail"); + fail("append didn't fail"); } catch (QuotaByStorageTypeExceededException e) { //ignore } @@ -302,9 +310,9 @@ public void testAppendOverTypeQuota() throws Exception { // check that the file exists, isn't UC, and has no dangling lease LeaseManager lm = cluster.getNamesystem().getLeaseManager(); INodeFile inode = getFSDirectory().getINode(file.toString()).asFile(); - Assert.assertNotNull(inode); - Assert.assertFalse("should not be UC", inode.isUnderConstruction()); - Assert.assertNull("should not have a lease", lm.getLease(inode)); + assertNotNull(inode); + assertFalse(inode.isUnderConstruction(), "should not be UC"); + assertNull(lm.getLease(inode), "should not have a lease"); // make sure the quota usage is unchanged final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature() .getSpaceConsumed().getStorageSpace(); @@ -317,7 +325,8 @@ public void testAppendOverTypeQuota() throws Exception { /** * Test truncate over quota does not mark file as UC or create a lease */ - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testTruncateOverQuota() throws Exception { final Path dir = getParent(GenericTestUtils.getMethodName()); final Path file = new Path(dir, "file"); @@ -334,7 +343,7 @@ public void testTruncateOverQuota() throws Exception { .getSpaceConsumed().getStorageSpace(); try { getDFS().truncate(file, BLOCKSIZE / 2 - 1); - Assert.fail("truncate didn't fail"); + fail("truncate didn't fail"); } catch (RemoteException e) { assertTrue(e.getClassName().contains("DSQuotaExceededException")); } @@ -342,9 +351,9 @@ public void testTruncateOverQuota() throws Exception { // check that the file exists, isn't UC, and has no dangling lease LeaseManager lm = cluster.getNamesystem().getLeaseManager(); INodeFile inode = getFSDirectory().getINode(file.toString()).asFile(); - Assert.assertNotNull(inode); - Assert.assertFalse("should not be UC", inode.isUnderConstruction()); - Assert.assertNull("should not have a lease", lm.getLease(inode)); + assertNotNull(inode); + assertFalse(inode.isUnderConstruction(), "should not be UC"); + assertNull(lm.getLease(inode), "should not have a lease"); // make sure the quota usage is unchanged final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature() .getSpaceConsumed().getStorageSpace(); @@ -429,7 +438,8 @@ private void scanDirsWithQuota(INodeDirectory dir, * and COMPLETE block steps, even if the replication factor is * changed during this time. */ - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testQuotaIssuesWhileCommitting() throws Exception { // We want a one-DN cluster so that we can force a lack of // commit by only instrumenting a single DN; we kill the other 3 @@ -529,19 +539,22 @@ private void testQuotaIssuesBeforeCommitting(short initialReplication, assertFalse(logs.getOutput().contains(logStmt)); } - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testCachedComputedSizesAgreeBeforeCommitting() throws Exception { // Don't actually change replication; just check that the sizes // agree before the commit period testQuotaIssuesBeforeCommitting((short)1, (short)1); } - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testDecreaseReplicationBeforeCommitting() throws Exception { testQuotaIssuesBeforeCommitting((short)4, (short)1); } - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testIncreaseReplicationBeforeCommitting() throws Exception { testQuotaIssuesBeforeCommitting((short)1, (short)4); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java index fecbbfa978619..dd7a3c172d21c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java @@ -18,7 +18,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -30,8 +30,8 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; /** * Test class for EncryptionZoneManager methods. Added tests for @@ -48,7 +48,7 @@ public class TestEncryptionZoneManager { private PermissionStatus defaultPermission; private EncryptionZoneManager ezManager; - @Before + @BeforeEach public void setup() { this.mockedDir = mock(FSDirectory.class); this.mockedINodesInPath = mock(INodesInPath.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java index b1c061e8c1bea..212d4e312909d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java @@ -24,14 +24,15 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import java.io.FileNotFoundException; import java.util.Random; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.when; @@ -97,56 +98,57 @@ private boolean unprotectedSetAttributes(short currPerm, short newPerm, @Test public void testUnprotectedSetPermissions() throws Exception { - assertTrue("setPermissions return true for updated permissions", - unprotectedSetAttributes((short) 0777, (short) 0)); - assertFalse("setPermissions should return false for same permissions", - unprotectedSetAttributes((short) 0777, (short) 0777)); + assertTrue(unprotectedSetAttributes((short) 0777, (short) 0), + "setPermissions return true for updated permissions"); + assertFalse(unprotectedSetAttributes((short) 0777, (short) 0777), + "setPermissions should return false for same permissions"); } @Test public void testUnprotectedSetOwner() throws Exception { - assertTrue("SetOwner should return true for a new user", - unprotectedSetAttributes((short) 0777, (short) 0777, "user1", - "user2", true)); - assertFalse("SetOwner should return false for same user", - unprotectedSetAttributes((short) 0777, (short) 0777, "user1", - "user1", true)); + assertTrue(unprotectedSetAttributes((short) 0777, (short) 0777, "user1", "user2", + true), + "SetOwner should return true for a new user"); + assertFalse(unprotectedSetAttributes((short) 0777, (short) 0777, "user1", "user1", + true), + "SetOwner should return false for same user"); } @Test public void testUnprotectedSetTimes() throws Exception { // atime < access time + precision - assertFalse("SetTimes should not update access time " - + "because it's within the last precision interval", - unprotectedSetTimes(100, 0, 1000, -1, false)); + assertFalse(unprotectedSetTimes(100, 0, 1000, -1, false), + "SetTimes should not update access time " + + "because it's within the last precision interval"); // atime = access time + precision - assertFalse("SetTimes should not update access time " - + "because it's within the last precision interval", - unprotectedSetTimes(1000, 0, 1000, -1, false)); + assertFalse(unprotectedSetTimes(1000, 0, 1000, -1, false), + "SetTimes should not update access time " + + "because it's within the last precision interval"); // atime > access time + precision - assertTrue("SetTimes should update access time", - unprotectedSetTimes(1011, 10, 1000, -1, false)); + assertTrue(unprotectedSetTimes(1011, 10, 1000, -1, false), + "SetTimes should update access time"); // atime < access time + precision, but force is set - assertTrue("SetTimes should update access time", - unprotectedSetTimes(100, 0, 1000, -1, true)); + assertTrue(unprotectedSetTimes(100, 0, 1000, -1, true), + "SetTimes should update access time"); // atime < access time + precision, but mtime is set - assertTrue("SetTimes should update access time", - unprotectedSetTimes(100, 0, 1000, 1, false)); + assertTrue(unprotectedSetTimes(100, 0, 1000, 1, false), + "SetTimes should update access time"); } - @Test(expected = FileNotFoundException.class) + @Test public void testUnprotectedSetTimesFNFE() throws FileNotFoundException { - FSDirectory fsd = Mockito.mock(FSDirectory.class); - INodesInPath iip = Mockito.mock(INodesInPath.class); - - when(fsd.hasWriteLock()).thenReturn(Boolean.TRUE); - when(iip.getLastINode()).thenReturn(null); + assertThrows(FileNotFoundException.class, () -> { + FSDirectory fsd = Mockito.mock(FSDirectory.class); + INodesInPath iip = Mockito.mock(INodesInPath.class); + when(fsd.hasWriteLock()).thenReturn(Boolean.TRUE); + when(iip.getLastINode()).thenReturn(null); + FSDirAttrOp.unprotectedSetTimes(fsd, iip, 0, 0, false); + }); - FSDirAttrOp.unprotectedSetTimes(fsd, iip, 0, 0, false); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirEncryptionZoneOp.java index 2fa6a33f0b232..05672bf4a7d12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirEncryptionZoneOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirEncryptionZoneOp.java @@ -23,7 +23,7 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doThrow; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirWriteFileOp.java index e3cfc01395f9b..57e2ccd673210 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirWriteFileOp.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyByte; import static org.mockito.ArgumentMatchers.anyInt; @@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.ValidateAddBlockResult; import org.apache.hadoop.net.Node; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; public class TestFSDirWriteFileOp { @@ -72,8 +72,8 @@ public void testIgnoreClientLocality() throws IOException { verifyNoMoreInteractions(bmMock); assertNull( - "Source node was assigned a value. Expected 'null' value because " - + "chooseTarget was flagged to ignore source node locality", - nodeCaptor.getValue()); + + nodeCaptor.getValue(), "Source node was assigned a value. Expected 'null' value because " + + "chooseTarget was flagged to ignore source node locality"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java index 556a26dc0338a..2c1473adaab93 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java @@ -42,17 +42,18 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * Test {@link FSDirectory}, the in-memory namespace tree. @@ -88,7 +89,7 @@ public class TestFSDirectory { private static final ImmutableList generatedXAttrs = ImmutableList.copyOf(generateXAttrs(numGeneratedXAttrs)); - @Before + @BeforeEach public void setUp() throws Exception { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2); @@ -109,7 +110,7 @@ public void setUp() throws Exception { hdfs.mkdirs(sub2); } - @After + @AfterEach public void tearDown() throws Exception { if (cluster != null) { cluster.shutdown(); @@ -134,9 +135,8 @@ public void testDumpTree() throws Exception { for(; (line = in.readLine()) != null; ) { line = line.trim(); if (!line.isEmpty() && !line.contains("snapshot")) { - assertTrue("line=" + line, - line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM) - || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM) + assertTrue(line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM) + || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM), "line=" + line ); checkClassName(line); } @@ -234,12 +234,11 @@ public void testINodeXAttrsLimit() throws Exception { */ private static void verifyXAttrsPresent(List newXAttrs, final int num) { - assertEquals("Unexpected number of XAttrs after multiset", num, - newXAttrs.size()); - for (int i=0; i generateXAttrs(final int numXAttrs) { /** * Test setting and removing multiple xattrs via single operations */ - @Test(timeout=300000) + @Test + @Timeout(value = 300) public void testXAttrMultiSetRemove() throws Exception { List existingXAttrs = Lists.newArrayListWithCapacity(0); @@ -307,14 +307,15 @@ public void testXAttrMultiSetRemove() throws Exception { List newXAttrs = FSDirXAttrOp.filterINodeXAttrs(existingXAttrs, toRemove, removedXAttrs); - assertEquals("Unexpected number of removed XAttrs", - expectedNumToRemove, removedXAttrs.size()); + assertEquals(expectedNumToRemove, removedXAttrs.size(), + "Unexpected number of removed XAttrs"); verifyXAttrsPresent(newXAttrs, numExpectedXAttrs); existingXAttrs = newXAttrs; } } - @Test(timeout=300000) + @Test + @Timeout(value = 300) public void testXAttrMultiAddRemoveErrors() throws Exception { // Test that the same XAttr can not be multiset twice @@ -356,9 +357,9 @@ public void testXAttrMultiAddRemoveErrors() throws Exception { List newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, toAdd, EnumSet.of( XAttrSetFlag.CREATE)); - assertEquals("Unexpected toAdd size", 2, toAdd.size()); + assertEquals(2, toAdd.size(), "Unexpected toAdd size"); for (XAttr x : toAdd) { - assertTrue("Did not find added XAttr " + x, newXAttrs.contains(x)); + assertTrue(newXAttrs.contains(x), "Did not find added XAttr " + x); } existingXAttrs = newXAttrs; @@ -374,10 +375,10 @@ public void testXAttrMultiAddRemoveErrors() throws Exception { } newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, toAdd, EnumSet.of(XAttrSetFlag.REPLACE)); - assertEquals("Unexpected number of new XAttrs", 3, newXAttrs.size()); - for (int i=0; i<3; i++) { - assertArrayEquals("Unexpected XAttr value", - new byte[] {(byte)(i*2)}, newXAttrs.get(i).getValue()); + assertEquals(3, newXAttrs.size(), "Unexpected number of new XAttrs"); + for (int i = 0; i < 3; i++) { + assertArrayEquals(new byte[]{(byte) (i * 2)}, newXAttrs.get(i).getValue(), + "Unexpected XAttr value"); } existingXAttrs = newXAttrs; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index cdc067aeb2880..ab245d7fd12aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.DataOutput; @@ -81,11 +81,11 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.Time; -import org.junit.Assert; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; -import static org.junit.Assert.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assumptions.assumeTrue; public class TestFSImage { @@ -112,7 +112,7 @@ public void testCompression() throws IOException { @Test public void testNativeCompression() throws IOException { - Assume.assumeTrue(NativeCodeLoader.isNativeCodeLoaded()); + assumeTrue(NativeCodeLoader.isNativeCodeLoaded()); Configuration conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true); setCompressCodec(conf, "org.apache.hadoop.io.compress.Lz4Codec"); @@ -167,7 +167,7 @@ private void testPersistHelper(Configuration conf) throws IOException { assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState()); // check lease manager Lease lease = fsn.leaseManager.getLease(file2Node); - Assert.assertNotNull(lease); + assertNotNull(lease); } finally { if (cluster != null) { cluster.shutdown(); @@ -230,11 +230,9 @@ private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration con .loadINodeWithLocalName(false, in, false); } - assertEquals(id, fileByLoaded.getId() ); - assertArrayEquals(isUC ? path.getBytes() : name, - fileByLoaded.getLocalName().getBytes()); - assertEquals(permissionStatus.getUserName(), - fileByLoaded.getPermissionStatus().getUserName()); + assertEquals(id, fileByLoaded.getId()); + assertArrayEquals(isUC ? path.getBytes() : name, fileByLoaded.getLocalName().getBytes()); + assertEquals(permissionStatus.getUserName(), fileByLoaded.getPermissionStatus().getUserName()); assertEquals(permissionStatus.getGroupName(), fileByLoaded.getPermissionStatus().getGroupName()); assertEquals(permissionStatus.getPermission(), @@ -248,8 +246,7 @@ private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration con assertEquals(file.getFileReplication(), fileByLoaded.getFileReplication()); if (isUC) { - assertEquals(client, - fileByLoaded.getFileUnderConstructionFeature().getClientName()); + assertEquals(client, fileByLoaded.getFileUnderConstructionFeature().getClientName()); assertEquals(clientMachine, fileByLoaded.getFileUnderConstructionFeature().getClientMachine()); } @@ -381,7 +378,8 @@ public void testDigest() throws IOException { /** * Ensure mtime and atime can be loaded from fsimage. */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testLoadMtimeAtime() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = null; @@ -428,7 +426,8 @@ public void testLoadMtimeAtime() throws Exception { /** * Ensure ctime is set during namenode formatting. */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testCtime() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = null; @@ -480,8 +479,8 @@ public void testZeroBlockSize() throws Exception { try { FileSystem fs = cluster.getFileSystem(); Path testPath = new Path("/tmp/zeroBlockFile"); - assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath)); - assertTrue("Name node didn't come up", cluster.isNameNodeUp(0)); + assertTrue(fs.exists(testPath), "File /tmp/zeroBlockFile doesn't exist "); + assertTrue(cluster.isNameNodeUp(0), "Name node didn't come up"); } finally { cluster.shutdown(); //Clean up @@ -492,7 +491,8 @@ public void testZeroBlockSize() throws Exception { /** * Ensure that FSImage supports BlockGroup. */ - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testSupportBlockGroup() throws Exception { final short GROUP_SIZE = (short) (testECPolicy.getNumDataUnits() + testECPolicy.getNumParityUnits()); @@ -542,12 +542,10 @@ public void testSupportBlockGroup() throws Exception { BlockInfo[] blks = inode.getBlocks(); assertEquals(1, blks.length); assertTrue(blks[0].isStriped()); - assertEquals(testECPolicy.getId(), - fs.getErasureCodingPolicy(file_10_4).getId()); + assertEquals(testECPolicy.getId(), fs.getErasureCodingPolicy(file_10_4).getId()); assertEquals(testECPolicy.getId(), ((BlockInfoStriped)blks[0]).getErasureCodingPolicy().getId()); - assertEquals(testECPolicy.getNumDataUnits(), - ((BlockInfoStriped) blks[0]).getDataBlockNum()); + assertEquals(testECPolicy.getNumDataUnits(), ((BlockInfoStriped) blks[0]).getDataBlockNum()); assertEquals(testECPolicy.getNumParityUnits(), ((BlockInfoStriped) blks[0]).getParityBlockNum()); byte[] content = DFSTestUtil.readFileAsBytes(fs, file_10_4); @@ -557,16 +555,14 @@ public void testSupportBlockGroup() throws Exception { // check the information of file_3_2 inode = fsn.dir.getINode(file_3_2.toString()).asFile(); assertTrue(inode.isStriped()); - assertEquals(SystemErasureCodingPolicies.getByID( - SystemErasureCodingPolicies.RS_3_2_POLICY_ID).getId(), + assertEquals( + SystemErasureCodingPolicies.getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID).getId(), inode.getErasureCodingPolicyID()); blks = inode.getBlocks(); assertEquals(1, blks.length); assertTrue(blks[0].isStriped()); - assertEquals(ec32Policy.getId(), - fs.getErasureCodingPolicy(file_3_2).getId()); - assertEquals(ec32Policy.getNumDataUnits(), - ((BlockInfoStriped) blks[0]).getDataBlockNum()); + assertEquals(ec32Policy.getId(), fs.getErasureCodingPolicy(file_3_2).getId()); + assertEquals(ec32Policy.getNumDataUnits(), ((BlockInfoStriped) blks[0]).getDataBlockNum()); assertEquals(ec32Policy.getNumParityUnits(), ((BlockInfoStriped) blks[0]).getParityBlockNum()); content = DFSTestUtil.readFileAsBytes(fs, file_3_2); @@ -818,14 +814,14 @@ public void testSaveAndLoadFileUnderReplicationPolicyDir() assertTrue(fs.exists(replicaFile2)); // check directories - assertEquals("Directory should have default EC policy.", - defaultEcPolicy, fs.getErasureCodingPolicy(ecDir)); - assertEquals("Directory should hide replication EC policy.", - null, fs.getErasureCodingPolicy(replicaDir)); + assertEquals(defaultEcPolicy, fs.getErasureCodingPolicy(ecDir), + "Directory should have default EC policy."); + assertEquals(null, fs.getErasureCodingPolicy(replicaDir), + "Directory should hide replication EC policy."); // check file1 - assertEquals("File should not have EC policy.", null, - fs.getErasureCodingPolicy(replicaFile1)); + assertEquals(null, fs.getErasureCodingPolicy(replicaFile1), + "File should not have EC policy."); // check internals of file2 INodeFile file2Node = fsn.dir.getINode4Write(replicaFile2.toString()).asFile(); @@ -834,13 +830,12 @@ public void testSaveAndLoadFileUnderReplicationPolicyDir() BlockInfo[] blks = file2Node.getBlocks(); assertEquals(1, blks.length); assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState()); - assertEquals("File should return expected replication factor.", - 2, blks[0].getReplication()); - assertEquals("File should not have EC policy.", null, - fs.getErasureCodingPolicy(replicaFile2)); + assertEquals(2, blks[0].getReplication(), "File should return expected replication factor."); + assertEquals(null, fs.getErasureCodingPolicy(replicaFile2), + "File should not have EC policy."); // check lease manager Lease lease = fsn.leaseManager.getLease(file2Node); - Assert.assertNotNull(lease); + assertNotNull(lease); } finally { if (cluster != null) { cluster.shutdown(); @@ -870,9 +865,9 @@ public void testSaveAndLoadErasureCodingPolicies() throws IOException{ cluster.restartNameNodes(); cluster.waitActive(); - assertEquals("Erasure coding policy number should match", - SystemErasureCodingPolicies.getPolicies().size(), - ErasureCodingPolicyManager.getInstance().getPolicies().length); + assertEquals(SystemErasureCodingPolicies.getPolicies().size(), + ErasureCodingPolicyManager.getInstance().getPolicies().length, + "Erasure coding policy number should match"); // Add new erasure coding policy ECSchema newSchema = new ECSchema("rs", 5, 4); @@ -893,17 +888,14 @@ public void testSaveAndLoadErasureCodingPolicies() throws IOException{ cluster.restartNameNodes(); cluster.waitActive(); - assertEquals("Erasure coding policy number should match", - SystemErasureCodingPolicies.getPolicies().size() + 1, - ErasureCodingPolicyManager.getInstance().getPolicies().length); + assertEquals(SystemErasureCodingPolicies.getPolicies().size() + 1, + ErasureCodingPolicyManager.getInstance().getPolicies().length, + "Erasure coding policy number should match"); ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getInstance().getByID(newPolicy.getId()); - assertEquals("Newly added erasure coding policy is not found", - newPolicy, ecPolicy); - assertEquals( - "Newly added erasure coding policy should be of disabled state", - ErasureCodingPolicyState.DISABLED, - DFSTestUtil.getECPolicyState(ecPolicy)); + assertEquals(newPolicy, ecPolicy, "Newly added erasure coding policy is not found"); + assertEquals(ErasureCodingPolicyState.DISABLED, DFSTestUtil.getECPolicyState(ecPolicy), + "Newly added erasure coding policy should be of disabled state"); // Test enable/disable/remove user customized erasure coding policy testChangeErasureCodingPolicyState(cluster, blockSize, newPolicy, false); @@ -942,13 +934,11 @@ private void testChangeErasureCodingPolicyState(MiniDFSCluster cluster, cluster.waitActive(); ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getInstance().getByID(targetPolicy.getId()); - assertEquals("The erasure coding policy is not found", - targetPolicy, ecPolicy); - assertEquals("The erasure coding policy should be of enabled state", - ErasureCodingPolicyState.ENABLED, - DFSTestUtil.getECPolicyState(ecPolicy)); - assertTrue("Policy should be in disabled state in FSImage!", - isPolicyEnabledInFsImage(targetPolicy)); + assertEquals(targetPolicy, ecPolicy, "The erasure coding policy is not found"); + assertEquals(ErasureCodingPolicyState.ENABLED, DFSTestUtil.getECPolicyState(ecPolicy), + "The erasure coding policy should be of enabled state"); + assertTrue(isPolicyEnabledInFsImage(targetPolicy), + "Policy should be in disabled state in FSImage!"); // Read file regardless of the erasure coding policy state DFSTestUtil.readFileAsBytes(fs, filePath); @@ -964,19 +954,18 @@ private void testChangeErasureCodingPolicyState(MiniDFSCluster cluster, cluster.waitActive(); ecPolicy = ErasureCodingPolicyManager.getInstance().getByID(targetPolicy.getId()); - assertEquals("The erasure coding policy is not found", - targetPolicy, ecPolicy); + assertEquals(targetPolicy, ecPolicy, "The erasure coding policy is not found"); ErasureCodingPolicyState ecPolicyState = DFSTestUtil.getECPolicyState(ecPolicy); if (isDefault) { - assertEquals("The erasure coding policy should be of " + - "enabled state", ErasureCodingPolicyState.ENABLED, ecPolicyState); + assertEquals(ErasureCodingPolicyState.ENABLED, ecPolicyState, + "The erasure coding policy should be of " + "enabled state"); } else { - assertEquals("The erasure coding policy should be of " + - "disabled state", ErasureCodingPolicyState.DISABLED, ecPolicyState); + assertEquals(ErasureCodingPolicyState.DISABLED, ecPolicyState, + "The erasure coding policy should be of " + "disabled state"); } - assertFalse("Policy should be in disabled state in FSImage!", - isPolicyEnabledInFsImage(targetPolicy)); + assertFalse(isPolicyEnabledInFsImage(targetPolicy), + "Policy should be in disabled state in FSImage!"); // Read file regardless of the erasure coding policy state DFSTestUtil.readFileAsBytes(fs, filePath); @@ -986,8 +975,7 @@ private void testChangeErasureCodingPolicyState(MiniDFSCluster cluster, fs.removeErasureCodingPolicy(ecPolicy.getName()); } catch (RemoteException e) { // built-in policy cannot been removed - assertTrue("Built-in policy cannot be removed", - ecPolicy.isSystemPolicy()); + assertTrue(ecPolicy.isSystemPolicy(), "Built-in policy cannot be removed"); assertExceptionContains("System erasure coding policy", e); return; } @@ -1002,11 +990,10 @@ private void testChangeErasureCodingPolicyState(MiniDFSCluster cluster, cluster.waitActive(); ecPolicy = ErasureCodingPolicyManager.getInstance().getByID( targetPolicy.getId()); - assertEquals("The erasure coding policy saved into and loaded from " + - "fsImage is bad", targetPolicy, ecPolicy); - assertEquals("The erasure coding policy should be of removed state", - ErasureCodingPolicyState.REMOVED, - DFSTestUtil.getECPolicyState(ecPolicy)); + assertEquals(targetPolicy, ecPolicy, + "The erasure coding policy saved into and loaded from " + "fsImage is bad"); + assertEquals(ErasureCodingPolicyState.REMOVED, DFSTestUtil.getECPolicyState(ecPolicy), + "The erasure coding policy should be of removed state"); // Read file regardless of the erasure coding policy state DFSTestUtil.readFileAsBytes(fs, filePath); fs.delete(dirPath, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java index bb03b30c86086..db77f9963c42b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java @@ -20,9 +20,9 @@ import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.IOException; @@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; -import org.junit.Test; +import org.junit.jupiter.api.Test; public class TestFSImageStorageInspector { /** @@ -57,7 +57,6 @@ public void testCurrentStorageInspector() throws IOException { assertSame(mockDir, latestImage.sd); assertTrue(inspector.isUpgradeFinalized()); - assertEquals(new File("/foo/current/"+getImageFileName(456)), - latestImage.getFile()); + assertEquals(new File("/foo/current/" + getImageFileName(456)), latestImage.getFile()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java index 3148308f3a954..0ccdd693aff82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.fs.permission.AclEntryScope.*; import static org.apache.hadoop.fs.permission.AclEntryType.*; import static org.apache.hadoop.fs.permission.FsAction.*; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import java.io.IOException; import java.util.List; @@ -34,16 +35,15 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.util.Lists; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; public class TestFSImageWithAcl { private static Configuration conf; private static MiniDFSCluster cluster; - @BeforeClass + @BeforeAll public static void setUp() throws IOException { conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); @@ -51,7 +51,7 @@ public static void setUp() throws IOException { cluster.waitActive(); } - @AfterClass + @AfterAll public static void tearDown() { if (cluster != null) { cluster.shutdown(); @@ -73,9 +73,8 @@ private void testAcl(boolean persistNamespace) throws IOException { AclStatus s = cluster.getNamesystem().getAclStatus(p.toString()); AclEntry[] returned = Lists.newArrayList(s.getEntries()).toArray( new AclEntry[0]); - Assert.assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", READ_EXECUTE), - aclEntry(ACCESS, GROUP, READ) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", READ_EXECUTE), + aclEntry(ACCESS, GROUP, READ)}, returned); fs.removeAcl(p); @@ -90,14 +89,13 @@ private void testAcl(boolean persistNamespace) throws IOException { s = cluster.getNamesystem().getAclStatus(p.toString()); returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]); - Assert.assertArrayEquals(new AclEntry[] { }, returned); + assertArrayEquals(new AclEntry[] {}, returned); fs.modifyAclEntries(p, Lists.newArrayList(e)); s = cluster.getNamesystem().getAclStatus(p.toString()); returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]); - Assert.assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, "foo", READ_EXECUTE), - aclEntry(ACCESS, GROUP, READ) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, "foo", READ_EXECUTE), + aclEntry(ACCESS, GROUP, READ)}, returned); } @Test @@ -140,20 +138,20 @@ private void doTestDefaultAclNewChildren(boolean persistNamespace) AclEntry[] fileReturned = fs.getAclStatus(filePath).getEntries() .toArray(new AclEntry[0]); - Assert.assertArrayEquals(fileExpected, fileReturned); + assertArrayEquals(fileExpected, fileReturned); AclEntry[] subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); - Assert.assertArrayEquals(subdirExpected, subdirReturned); + assertArrayEquals(subdirExpected, subdirReturned); assertPermission(fs, subdirPath, permExpected); restart(fs, persistNamespace); fileReturned = fs.getAclStatus(filePath).getEntries() .toArray(new AclEntry[0]); - Assert.assertArrayEquals(fileExpected, fileReturned); + assertArrayEquals(fileExpected, fileReturned); subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); - Assert.assertArrayEquals(subdirExpected, subdirReturned); + assertArrayEquals(subdirExpected, subdirReturned); assertPermission(fs, subdirPath, permExpected); aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE)); @@ -161,40 +159,40 @@ private void doTestDefaultAclNewChildren(boolean persistNamespace) fileReturned = fs.getAclStatus(filePath).getEntries() .toArray(new AclEntry[0]); - Assert.assertArrayEquals(fileExpected, fileReturned); + assertArrayEquals(fileExpected, fileReturned); subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); - Assert.assertArrayEquals(subdirExpected, subdirReturned); + assertArrayEquals(subdirExpected, subdirReturned); assertPermission(fs, subdirPath, permExpected); restart(fs, persistNamespace); fileReturned = fs.getAclStatus(filePath).getEntries() .toArray(new AclEntry[0]); - Assert.assertArrayEquals(fileExpected, fileReturned); + assertArrayEquals(fileExpected, fileReturned); subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); - Assert.assertArrayEquals(subdirExpected, subdirReturned); + assertArrayEquals(subdirExpected, subdirReturned); assertPermission(fs, subdirPath, permExpected); fs.removeAcl(dirPath); fileReturned = fs.getAclStatus(filePath).getEntries() .toArray(new AclEntry[0]); - Assert.assertArrayEquals(fileExpected, fileReturned); + assertArrayEquals(fileExpected, fileReturned); subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); - Assert.assertArrayEquals(subdirExpected, subdirReturned); + assertArrayEquals(subdirExpected, subdirReturned); assertPermission(fs, subdirPath, permExpected); restart(fs, persistNamespace); fileReturned = fs.getAclStatus(filePath).getEntries() .toArray(new AclEntry[0]); - Assert.assertArrayEquals(fileExpected, fileReturned); + assertArrayEquals(fileExpected, fileReturned); subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); - Assert.assertArrayEquals(subdirExpected, subdirReturned); + assertArrayEquals(subdirExpected, subdirReturned); assertPermission(fs, subdirPath, permExpected); } @@ -221,20 +219,20 @@ public void testRootACLAfterLoadingFsImage() throws IOException { AclStatus s = cluster.getNamesystem().getAclStatus(rootdir.toString()); AclEntry[] returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]); - Assert.assertArrayEquals( + assertArrayEquals( new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(ACCESS, GROUP, "bar", READ), - aclEntry(ACCESS, GROUP, "foo", ALL) }, returned); + aclEntry(ACCESS, GROUP, "bar", READ), aclEntry(ACCESS, GROUP, "foo", ALL) }, + returned); // restart - hence save and load from fsimage restart(fs, true); s = cluster.getNamesystem().getAclStatus(rootdir.toString()); returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]); - Assert.assertArrayEquals( + assertArrayEquals( new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(ACCESS, GROUP, "bar", READ), - aclEntry(ACCESS, GROUP, "foo", ALL) }, returned); + aclEntry(ACCESS, GROUP, "bar", READ), aclEntry(ACCESS, GROUP, "foo", ALL) }, + returned); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java index 89d48fb602ae0..2edb051b57ad6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java @@ -40,10 +40,10 @@ import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.RwLockMode; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.event.Level; import java.io.File; @@ -54,8 +54,8 @@ import java.util.List; import java.util.Random; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test FSImage save/load when Snapshot is supported @@ -87,13 +87,13 @@ public void createCluster() throws IOException { hdfs = cluster.getFileSystem(); } - @Before + @BeforeEach public void setUp() throws Exception { conf = new Configuration(); createCluster(); } - @After + @AfterEach public void tearDown() throws Exception { if (cluster != null) { cluster.shutdown(); @@ -202,8 +202,8 @@ public void testSnapshotOnRoot() throws Exception { fsn = cluster.getNamesystem(); hdfs = cluster.getFileSystem(); final INodeDirectory rootNode = fsn.dir.getRoot(); - assertTrue("The children list of root should be empty", - rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty()); + assertTrue(rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty(), + "The children list of root should be empty"); // one snapshot on root: s1 DiffList diffList = rootNode.getDiffs().asList(); assertEquals(1, diffList.size()); @@ -325,22 +325,23 @@ void checkImage(int s) throws IOException { long numSnapshotAfter = fsn.getNumSnapshots(); SnapshottableDirectoryStatus[] dirAfter = hdfs.getSnapshottableDirListing(); - Assert.assertEquals(numSdirBefore, numSdirAfter); - Assert.assertEquals(numSnapshotBefore, numSnapshotAfter); - Assert.assertEquals(dirBefore.length, dirAfter.length); + assertEquals(numSdirBefore, numSdirAfter); + assertEquals(numSnapshotBefore, numSnapshotAfter); + assertEquals(dirBefore.length, dirAfter.length); List pathListBefore = new ArrayList(); for (SnapshottableDirectoryStatus sBefore : dirBefore) { pathListBefore.add(sBefore.getFullPath().toString()); } for (SnapshottableDirectoryStatus sAfter : dirAfter) { - Assert.assertTrue(pathListBefore.contains(sAfter.getFullPath().toString())); + assertTrue(pathListBefore.contains(sAfter.getFullPath().toString())); } } /** * Test the fsimage saving/loading while file appending. */ - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testSaveLoadImageWithAppending() throws Exception { Path sub1 = new Path(dir, "sub1"); Path sub1file1 = new Path(sub1, "sub1file1"); @@ -396,7 +397,8 @@ public void testSaveLoadImageWithAppending() throws Exception { /** * Test the fsimage loading while there is file under construction. */ - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testLoadImageWithAppending() throws Exception { Path sub1 = new Path(dir, "sub1"); Path sub1file1 = new Path(sub1, "sub1file1"); @@ -427,7 +429,8 @@ public void testLoadImageWithAppending() throws Exception { * Test fsimage loading when 1) there is an empty file loaded from fsimage, * and 2) there is later an append operation to be applied from edit log. */ - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testLoadImageWithEmptyFile() throws Exception { // create an empty file Path file = new Path(dir, "file"); @@ -469,7 +472,8 @@ public void testLoadImageWithEmptyFile() throws Exception { * we may save these files/dirs to the fsimage, and cause FileNotFound * Exception while loading fsimage. */ - @Test (timeout=300000) + @Test + @Timeout(value = 300) public void testSaveLoadImageAfterSnapshotDeletion() throws Exception { // create initial dir and subdir @@ -565,7 +569,8 @@ void deleteSnapshot(Path directory, String snapshotName) throws Exception { printTree("deleted snapshot " + snapshotName); } - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testDoubleRename() throws Exception { final Path parent = new Path("/parent"); hdfs.mkdirs(parent); @@ -639,11 +644,12 @@ String printTree(String label) throws Exception { output.println(b); final String s = NamespacePrintVisitor.print2Sting(fsn); - Assert.assertEquals(b, s); + assertEquals(b, s); return b; } - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testFSImageWithDoubleRename() throws Exception { final Path dir1 = new Path("/dir1"); final Path dir2 = new Path("/dir2"); @@ -684,7 +690,8 @@ public void testFSImageWithDoubleRename() throws Exception { } - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testFSImageWithRename1() throws Exception { final Path dir1 = new Path("/dir1"); final Path dir2 = new Path("/dir2"); @@ -729,7 +736,8 @@ public void testFSImageWithRename1() throws Exception { hdfs = cluster.getFileSystem(); } - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testFSImageWithRename2() throws Exception { final Path dir1 = new Path("/dir1"); final Path dir2 = new Path("/dir2"); @@ -770,7 +778,8 @@ public void testFSImageWithRename2() throws Exception { hdfs = cluster.getFileSystem(); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testFSImageWithRename3() throws Exception { final Path dir1 = new Path("/dir1"); final Path dir2 = new Path("/dir2"); @@ -815,7 +824,8 @@ public void testFSImageWithRename3() throws Exception { hdfs = cluster.getFileSystem(); } - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testFSImageWithRename4() throws Exception { final Path dir1 = new Path("/dir1"); final Path dir2 = new Path("/dir2"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java index 9d7b4706cfe59..8853e2f511f19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java @@ -30,10 +30,12 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * 1) save xattrs, restart NN, assert xattrs reloaded from edit log, @@ -53,7 +55,7 @@ public class TestFSImageWithXAttr { private static final String name3 = "user.a3"; private static final byte[] value3 = {}; - @BeforeClass + @BeforeAll public static void setUp() throws IOException { conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); @@ -61,7 +63,7 @@ public static void setUp() throws IOException { cluster.waitActive(); } - @AfterClass + @AfterAll public static void tearDown() { if (cluster != null) { cluster.shutdown(); @@ -80,20 +82,20 @@ private void testXAttr(boolean persistNamespace) throws IOException { restart(fs, persistNamespace); Map xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 3); - Assert.assertArrayEquals(value1, xattrs.get(name1)); - Assert.assertArrayEquals(value2, xattrs.get(name2)); - Assert.assertArrayEquals(value3, xattrs.get(name3)); + assertEquals(xattrs.size(), 3); + assertArrayEquals(value1, xattrs.get(name1)); + assertArrayEquals(value2, xattrs.get(name2)); + assertArrayEquals(value3, xattrs.get(name3)); fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE)); restart(fs, persistNamespace); xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 3); - Assert.assertArrayEquals(newValue1, xattrs.get(name1)); - Assert.assertArrayEquals(value2, xattrs.get(name2)); - Assert.assertArrayEquals(value3, xattrs.get(name3)); + assertEquals(xattrs.size(), 3); + assertArrayEquals(newValue1, xattrs.get(name1)); + assertArrayEquals(value2, xattrs.get(name2)); + assertArrayEquals(value3, xattrs.get(name3)); fs.removeXAttr(path, name1); fs.removeXAttr(path, name2); @@ -101,7 +103,7 @@ private void testXAttr(boolean persistNamespace) throws IOException { restart(fs, persistNamespace); xattrs = fs.getXAttrs(path); - Assert.assertEquals(xattrs.size(), 0); + assertEquals(xattrs.size(), 0); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java index 95b63960e3485..1848dd0bfbaaa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java @@ -32,8 +32,10 @@ import static org.apache.hadoop.fs.permission.FsAction.WRITE; import static org.apache.hadoop.fs.permission.FsAction.WRITE_EXECUTE; import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry; -import static org.junit.Assert.fail; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -54,9 +56,8 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; /** @@ -79,7 +80,7 @@ public class TestFSPermissionChecker { private FSDirectory dir; private INodeDirectory inodeRoot; - @Before + @BeforeEach public void setUp() throws IOException { Configuration conf = new Configuration(); FSNamesystem fsn = mock(FSNamesystem.class); @@ -421,11 +422,10 @@ private void assertPermissionDenied(UserGroupInformation user, String path, fail("expected AccessControlException for user + " + user + ", path = " + path + ", access = " + access); } catch (AccessControlException e) { - assertTrue("Permission denied messages must carry the username", - e.getMessage().contains(user.getUserName().toString())); - assertTrue("Permission denied messages must carry the path parent", - e.getMessage().contains( - new Path(path).getParent().toUri().getPath())); + assertTrue(e.getMessage().contains(user.getUserName().toString()), + "Permission denied messages must carry the username"); + assertTrue(e.getMessage().contains(new Path(path).getParent().toUri().getPath()), + "Permission denied messages must carry the path parent"); } } @@ -461,7 +461,7 @@ public void testCheckAccessControlEnforcerSlowness() throws Exception { final String m1 = FSPermissionChecker.runCheckPermission( () -> FSPermissionChecker.LOG.info("Fast runner"), checkAccessControlEnforcerSlowness); - Assert.assertNull(m1); + assertNull(m1); final String m2 = FSPermissionChecker.runCheckPermission(() -> { FSPermissionChecker.LOG.info("Slow runner"); @@ -472,6 +472,6 @@ public void testCheckAccessControlEnforcerSlowness() throws Exception { throw new IllegalStateException(e); } }, checkAccessControlEnforcerSlowness); - Assert.assertNotNull(m2); + assertNotNull(m2); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextAcl.java index f9a6889b8f4a2..9fcf7f75ebc69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextAcl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextAcl.java @@ -28,14 +28,14 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.junit.BeforeClass; +import org.junit.jupiter.api.BeforeAll; /** * Tests for ACL operation through FileContext APIs */ public class TestFileContextAcl extends FSAclBaseTest { - @BeforeClass + @BeforeAll public static void init() throws Exception { conf = new Configuration(); startCluster(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java index 02818c952dabf..1a9f722b95e07 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; @@ -31,8 +31,8 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Test; - +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; /** * This class tests that a file system adheres to the limit of @@ -84,8 +84,7 @@ public void testFileLimit() throws IOException { // check that / exists // Path path = new Path("/"); - assertTrue("/ should be a directory", - fs.getFileStatus(path).isDirectory()); + assertTrue(fs.getFileStatus(path).isDirectory(), "/ should be a directory"); currentNodes = 1; // root inode // verify that we can create the specified number of files. We leave @@ -107,7 +106,7 @@ public void testFileLimit() throws IOException { } catch (IOException e) { hitException = true; } - assertTrue("Was able to exceed file limit", hitException); + assertTrue(hitException, "Was able to exceed file limit"); // delete one file Path file0 = new Path("/filestatus0"); @@ -147,7 +146,7 @@ public void testFileLimit() throws IOException { } catch (IOException e) { hitException = true; } - assertTrue("Was able to exceed dir limit", hitException); + assertTrue(hitException, "Was able to exceed dir limit"); } finally { fs.close(); @@ -162,7 +161,8 @@ public void testFileLimitSimulated() throws IOException { simulatedStorage = false; } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testMaxBlocksPerFileLimit() throws Exception { Configuration conf = new HdfsConfiguration(); // Make a small block size and a low limit @@ -192,7 +192,8 @@ public void testMaxBlocksPerFileLimit() throws Exception { } } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testMinBlockSizeLimit() throws Exception { final long blockSize = 4096; Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index c83676b245f7b..6e7ed2e82f40f 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -18,17 +18,14 @@ package org.apache.hadoop.hdfs.server.namenode; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; import java.util.concurrent.ThreadLocalRandom; @@ -72,9 +69,10 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; import org.slf4j.event.Level; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; public class TestFileTruncate { static { @@ -99,7 +97,7 @@ public class TestFileTruncate { private Path parent; - @Before + @BeforeEach public void setUp() throws IOException { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE); @@ -116,7 +114,7 @@ public void setUp() throws IOException { parent = new Path("/test"); } - @After + @AfterEach public void tearDown() throws IOException { if(fs != null) { fs.close(); @@ -145,22 +143,20 @@ public void testBasicTruncate() throws IOException { writeContents(contents, fileLength, p); int newLength = fileLength - toTruncate; - assertTrue("DFS supports truncate", - fs.hasPathCapability(p, CommonPathCapabilities.FS_TRUNCATE)); + assertTrue(fs.hasPathCapability(p, CommonPathCapabilities.FS_TRUNCATE), + "DFS supports truncate"); boolean isReady = fs.truncate(p, newLength); LOG.info("fileLength=" + fileLength + ", newLength=" + newLength + ", toTruncate=" + toTruncate + ", isReady=" + isReady); - assertEquals("File must be closed for zero truncate" - + " or truncating at the block boundary", - isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0); + assertEquals(isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0, + "File must be closed for zero truncate" + " or truncating at the block boundary"); if (!isReady) { checkBlockRecovery(p); } ContentSummary cs = fs.getContentSummary(parent); - assertEquals("Bad disk space usage", - cs.getSpaceConsumed(), newLength * REPLICATION); + assertEquals(cs.getSpaceConsumed(), newLength * REPLICATION, "Bad disk space usage"); // validate the file content checkFullFile(p, newLength, contents); } @@ -180,14 +176,13 @@ public void testMultipleTruncate() throws IOException { for(int n = data.length; n > 0; ) { final int newLength = ThreadLocalRandom.current().nextInt(n); - assertTrue("DFS supports truncate", - fs.hasPathCapability(p, CommonPathCapabilities.FS_TRUNCATE)); + assertTrue(fs.hasPathCapability(p, CommonPathCapabilities.FS_TRUNCATE), + "DFS supports truncate"); final boolean isReady = fs.truncate(p, newLength); LOG.info("newLength=" + newLength + ", isReady=" + isReady); - assertEquals("File must be closed for truncating at the block boundary", - isReady, newLength % BLOCK_SIZE == 0); - assertEquals("Truncate is not idempotent", - isReady, fs.truncate(p, newLength)); + assertEquals(isReady, newLength % BLOCK_SIZE == 0, + "File must be closed for truncating at the block boundary"); + assertEquals(isReady, fs.truncate(p, newLength), "Truncate is not idempotent"); if (!isReady) { checkBlockRecovery(p); } @@ -215,12 +210,12 @@ public void testSnapshotTruncateThenDeleteSnapshot() throws IOException { final int newLength = data.length - 1; assert newLength % BLOCK_SIZE != 0 : " newLength must not be multiple of BLOCK_SIZE"; - assertTrue("DFS supports truncate", - fs.hasPathCapability(p, CommonPathCapabilities.FS_TRUNCATE)); + assertTrue(fs.hasPathCapability(p, CommonPathCapabilities.FS_TRUNCATE), + "DFS supports truncate"); final boolean isReady = fs.truncate(p, newLength); LOG.info("newLength=" + newLength + ", isReady=" + isReady); - assertEquals("File must be closed for truncating at the block boundary", - isReady, newLength % BLOCK_SIZE == 0); + assertEquals(isReady, newLength % BLOCK_SIZE == 0, + "File must be closed for truncating at the block boundary"); fs.deleteSnapshot(dir, snapshot); if (!isReady) { checkBlockRecovery(p); @@ -233,7 +228,8 @@ public void testSnapshotTruncateThenDeleteSnapshot() throws IOException { /** * Test truncate twice together on a file. */ - @Test(timeout=90000) + @Test + @Timeout(value = 90) public void testTruncateTwiceTogether() throws Exception { Path dir = new Path("/testTruncateTwiceTogether"); @@ -359,7 +355,7 @@ void testSnapshotWithAppendTruncate(int... deleteOrder) // Diskspace consumed should be 10 bytes * 3. [blk 1,2,3] ContentSummary contentSummary = fs.getContentSummary(parent); - assertThat(contentSummary.getSpaceConsumed(), is(30L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(30L); // Add file to snapshot and append String[] ss = new String[] {"ss0", "ss1", "ss2", "ss3"}; @@ -372,7 +368,7 @@ void testSnapshotWithAppendTruncate(int... deleteOrder) // Diskspace consumed should be 15 bytes * 3. [blk 1,2,3,4] contentSummary = fs.getContentSummary(parent); - assertThat(contentSummary.getSpaceConsumed(), is(45L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(45L); // Create another snapshot without changes snapshotDir = fs.createSnapshot(parent, ss[1]); @@ -387,35 +383,35 @@ void testSnapshotWithAppendTruncate(int... deleteOrder) // Diskspace consumed should be 20 bytes * 3. [blk 1,2,3,4,5] contentSummary = fs.getContentSummary(parent); - assertThat(contentSummary.getSpaceConsumed(), is(60L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(60L); // Truncate to block boundary int newLength = length[0] + BLOCK_SIZE / 2; boolean isReady = fs.truncate(src, newLength); BlockManagerTestUtil.waitForMarkedDeleteQueueIsEmpty( cluster.getNamesystem(0).getBlockManager()); - assertTrue("Recovery is not expected.", isReady); + assertTrue(isReady, "Recovery is not expected."); assertFileLength(snapshotFiles[2], length[2]); assertFileLength(snapshotFiles[1], length[1]); assertFileLength(snapshotFiles[0], length[0]); assertBlockNotPresent(appendedBlk); // Diskspace consumed should be 16 bytes * 3. [blk 1,2,3 SS:4] contentSummary = fs.getContentSummary(parent); - assertThat(contentSummary.getSpaceConsumed(), is(48L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(48L); // Truncate full block again newLength = length[0] - BLOCK_SIZE / 2; isReady = fs.truncate(src, newLength); - assertTrue("Recovery is not expected.", isReady); + assertTrue(isReady, "Recovery is not expected."); assertFileLength(snapshotFiles[2], length[2]); assertFileLength(snapshotFiles[1], length[1]); assertFileLength(snapshotFiles[0], length[0]); // Diskspace consumed should be 16 bytes * 3. [blk 1,2 SS:3,4] contentSummary = fs.getContentSummary(parent); - assertThat(contentSummary.getSpaceConsumed(), is(48L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(48L); // Truncate half of the last block newLength -= BLOCK_SIZE / 2; isReady = fs.truncate(src, newLength); - assertFalse("Recovery is expected.", isReady); + assertFalse(isReady, "Recovery is expected."); checkBlockRecovery(src); assertFileLength(snapshotFiles[2], length[2]); assertFileLength(snapshotFiles[1], length[1]); @@ -424,20 +420,19 @@ void testSnapshotWithAppendTruncate(int... deleteOrder) .getBlock().getLocalBlock(); // Diskspace consumed should be 16 bytes * 3. [blk 1,6 SS:2,3,4] contentSummary = fs.getContentSummary(parent); - assertThat(contentSummary.getSpaceConsumed(), is(54L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(54L); snapshotDir = fs.createSnapshot(parent, ss[3]); snapshotFiles[3] = new Path(snapshotDir, truncateFile); length[3] = newLength; // Delete file. Should still be able to read snapshots int numINodes = fsDir.getInodeMapSize(); isReady = fs.delete(src, false); - assertTrue("Delete failed.", isReady); + assertTrue(isReady, "Delete failed."); assertFileLength(snapshotFiles[3], length[3]); assertFileLength(snapshotFiles[2], length[2]); assertFileLength(snapshotFiles[1], length[1]); assertFileLength(snapshotFiles[0], length[0]); - assertEquals("Number of INodes should not change", - numINodes, fsDir.getInodeMapSize()); + assertEquals(numINodes, fsDir.getInodeMapSize(), "Number of INodes should not change"); fs.deleteSnapshot(parent, ss[3]); BlockManagerTestUtil.waitForMarkedDeleteQueueIsEmpty( cluster.getNamesystem(0).getBlockManager()); @@ -446,18 +441,17 @@ void testSnapshotWithAppendTruncate(int... deleteOrder) assertBlockNotPresent(replacedBlk); // Diskspace consumed should be 16 bytes * 3. [SS:1,2,3,4] contentSummary = fs.getContentSummary(parent); - assertThat(contentSummary.getSpaceConsumed(), is(48L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(48L); // delete snapshots in the specified order fs.deleteSnapshot(parent, ss[deleteOrder[0]]); assertFileLength(snapshotFiles[deleteOrder[1]], length[deleteOrder[1]]); assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]); assertBlockExists(firstBlk); assertBlockExists(lastBlk); - assertEquals("Number of INodes should not change", - numINodes, fsDir.getInodeMapSize()); + assertEquals(numINodes, fsDir.getInodeMapSize(), "Number of INodes should not change"); // Diskspace consumed should be 16 bytes * 3. [SS:1,2,3,4] contentSummary = fs.getContentSummary(parent); - assertThat(contentSummary.getSpaceConsumed(), is(48L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(48L); fs.deleteSnapshot(parent, ss[deleteOrder[1]]); BlockManagerTestUtil.waitForMarkedDeleteQueueIsEmpty( cluster.getNamesystem(0).getBlockManager()); @@ -467,13 +461,12 @@ void testSnapshotWithAppendTruncate(int... deleteOrder) if(fs.exists(snapshotFiles[0])) { // Diskspace consumed should be 0 bytes * 3. [SS:1,2,3] assertBlockNotPresent(lastBlk); - assertThat(contentSummary.getSpaceConsumed(), is(36L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(36L); } else { // Diskspace consumed should be 48 bytes * 3. [SS:1,2,3,4] - assertThat(contentSummary.getSpaceConsumed(), is(48L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(48L); } - assertEquals("Number of INodes should not change", - numINodes, fsDir .getInodeMapSize()); + assertEquals(numINodes, fsDir.getInodeMapSize(), "Number of INodes should not change"); fs.deleteSnapshot(parent, ss[deleteOrder[2]]); BlockManagerTestUtil.waitForMarkedDeleteQueueIsEmpty( cluster.getNamesystem(0).getBlockManager()); @@ -481,9 +474,8 @@ void testSnapshotWithAppendTruncate(int... deleteOrder) assertBlockNotPresent(lastBlk); // Diskspace consumed should be 0 bytes * 3. [] contentSummary = fs.getContentSummary(parent); - assertThat(contentSummary.getSpaceConsumed(), is(0L)); - assertNotEquals("Number of INodes should change", - numINodes, fsDir.getInodeMapSize()); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(0L); + assertNotEquals(numINodes, fsDir.getInodeMapSize(), "Number of INodes should change"); } /** @@ -519,7 +511,7 @@ void testSnapshotWithTruncates(int... deleteOrder) // Diskspace consumed should be 12 bytes * 3. [blk 1,2,3] ContentSummary contentSummary = fs.getContentSummary(parent); - assertThat(contentSummary.getSpaceConsumed(), is(36L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(36L); // Add file to snapshot and append String[] ss = new String[] {"ss0", "ss1", "ss2"}; @@ -527,18 +519,18 @@ void testSnapshotWithTruncates(int... deleteOrder) snapshotFiles[0] = new Path(snapshotDir, truncateFile); length[1] = 2 * BLOCK_SIZE; boolean isReady = fs.truncate(src, 2 * BLOCK_SIZE); - assertTrue("Recovery is not expected.", isReady); + assertTrue(isReady, "Recovery is not expected."); // Diskspace consumed should be 12 bytes * 3. [blk 1,2 SS:3] contentSummary = fs.getContentSummary(parent); - assertThat(contentSummary.getSpaceConsumed(), is(36L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(36L); snapshotDir = fs.createSnapshot(parent, ss[1]); snapshotFiles[1] = new Path(snapshotDir, truncateFile); // Create another snapshot with truncate length[2] = BLOCK_SIZE + BLOCK_SIZE / 2; isReady = fs.truncate(src, BLOCK_SIZE + BLOCK_SIZE / 2); - assertFalse("Recovery is expected.", isReady); + assertFalse(isReady, "Recovery is expected."); checkBlockRecovery(src); snapshotDir = fs.createSnapshot(parent, ss[2]); snapshotFiles[2] = new Path(snapshotDir, truncateFile); @@ -547,7 +539,7 @@ void testSnapshotWithTruncates(int... deleteOrder) // Diskspace consumed should be 14 bytes * 3. [blk 1,4 SS:2,3] contentSummary = fs.getContentSummary(parent); - assertThat(contentSummary.getSpaceConsumed(), is(42L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(42L); fs.deleteSnapshot(parent, ss[deleteOrder[0]]); BlockManagerTestUtil.waitForMarkedDeleteQueueIsEmpty( @@ -560,11 +552,11 @@ void testSnapshotWithTruncates(int... deleteOrder) contentSummary = fs.getContentSummary(parent); if(fs.exists(snapshotFiles[0])) { // Diskspace consumed should be 14 bytes * 3. [blk 1,4 SS:2,3] - assertThat(contentSummary.getSpaceConsumed(), is(42L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(42L); assertBlockExists(lastBlk); } else { // Diskspace consumed should be 10 bytes * 3. [blk 1,4 SS:2] - assertThat(contentSummary.getSpaceConsumed(), is(30L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(30L); assertBlockNotPresent(lastBlk); } @@ -578,15 +570,15 @@ void testSnapshotWithTruncates(int... deleteOrder) contentSummary = fs.getContentSummary(parent); if(fs.exists(snapshotFiles[0])) { // Diskspace consumed should be 14 bytes * 3. [blk 1,4 SS:2,3] - assertThat(contentSummary.getSpaceConsumed(), is(42L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(42L); assertBlockExists(lastBlk); } else if(fs.exists(snapshotFiles[1])) { // Diskspace consumed should be 10 bytes * 3. [blk 1,4 SS:2] - assertThat(contentSummary.getSpaceConsumed(), is(30L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(30L); assertBlockNotPresent(lastBlk); } else { // Diskspace consumed should be 6 bytes * 3. [blk 1,4 SS:] - assertThat(contentSummary.getSpaceConsumed(), is(18L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(18L); assertBlockNotPresent(lastBlk); } @@ -598,8 +590,8 @@ void testSnapshotWithTruncates(int... deleteOrder) // Diskspace consumed should be 6 bytes * 3. [blk 1,4 SS:] contentSummary = fs.getContentSummary(parent); - assertThat(contentSummary.getSpaceConsumed(), is(18L)); - assertThat(contentSummary.getLength(), is(6L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(18L); + assertThat(contentSummary.getLength()).isEqualTo(6L); fs.delete(src, false); BlockManagerTestUtil.waitForMarkedDeleteQueueIsEmpty( @@ -608,7 +600,7 @@ void testSnapshotWithTruncates(int... deleteOrder) // Diskspace consumed should be 0 bytes * 3. [] contentSummary = fs.getContentSummary(parent); - assertThat(contentSummary.getSpaceConsumed(), is(0L)); + assertThat(contentSummary.getSpaceConsumed()).isEqualTo(0L); } /** @@ -706,8 +698,9 @@ public void testTruncateFailure() throws IOException { int newLength = startingFileSize - toTruncate; boolean isReady = fs.truncate(p, newLength); - assertThat("truncate should have triggered block recovery.", - isReady, is(false)); + assertThat(isReady) + .as("truncate should have triggered block recovery.") + .isFalse(); { try { @@ -730,8 +723,10 @@ public void testTruncateFailure() throws IOException { } try { Thread.sleep(SLEEP); } catch (InterruptedException ignored) {} } - assertThat("lease recovery should have occurred in ~" + - SLEEP * RECOVERY_ATTEMPTS + " ms.", recoveryTriggered, is(true)); + assertThat(recoveryTriggered) + .as("lease recovery should have occurred in ~" + + SLEEP * RECOVERY_ATTEMPTS + " ms.") + .isTrue(); cluster.startDataNodes(conf, DATANODE_NUM, true, StartupOption.REGULAR, null); cluster.waitActive(); @@ -751,7 +746,8 @@ public void testTruncateFailure() throws IOException { * The last block is truncated at mid. (non copy-on-truncate) * dn0 is shutdown before truncate and restart after truncate successful. */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testTruncateWithDataNodesRestart() throws Exception { int startingFileSize = 3 * BLOCK_SIZE; byte[] contents = AppendTestUtil.initBuffer(startingFileSize); @@ -773,8 +769,7 @@ public void testTruncateWithDataNodesRestart() throws Exception { * GS should increase. * The truncated block will be replicated to dn0 after it restarts. */ - assertEquals(newBlock.getBlock().getBlockId(), - oldBlock.getBlock().getBlockId()); + assertEquals(newBlock.getBlock().getBlockId(), oldBlock.getBlock().getBlockId()); assertEquals(newBlock.getBlock().getGenerationStamp(), oldBlock.getBlock().getGenerationStamp() + 1); @@ -785,14 +780,13 @@ public void testTruncateWithDataNodesRestart() throws Exception { DFSTestUtil.waitReplication(fs, p, REPLICATION); // Old replica is disregarded and replaced with the truncated one FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(dn); - assertEquals(utils.getStoredDataLength(newBlock.getBlock()), - newBlock.getBlockSize()); + assertEquals(utils.getStoredDataLength(newBlock.getBlock()), newBlock.getBlockSize()); assertEquals(utils.getStoredGenerationStamp(newBlock.getBlock()), newBlock.getBlock().getGenerationStamp()); // Validate the file FileStatus fileStatus = fs.getFileStatus(p); - assertThat(fileStatus.getLen(), is((long) newLength)); + assertThat(fileStatus.getLen()).isEqualTo((long) newLength); checkFullFile(p, newLength, contents); fs.delete(parent, true); @@ -802,7 +796,8 @@ public void testTruncateWithDataNodesRestart() throws Exception { * The last block is truncated at mid. (copy-on-truncate) * dn1 is shutdown before truncate and restart after truncate successful. */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testCopyOnTruncateWithDataNodesRestart() throws Exception { int startingFileSize = 3 * BLOCK_SIZE; byte[] contents = AppendTestUtil.initBuffer(startingFileSize); @@ -825,8 +820,7 @@ public void testCopyOnTruncateWithDataNodesRestart() throws Exception { * For copy-on-truncate, new block is made with new block id and new GS. * The replicas of the new block is 2, then it will be replicated to dn1. */ - assertNotEquals(newBlock.getBlock().getBlockId(), - oldBlock.getBlock().getBlockId()); + assertNotEquals(newBlock.getBlock().getBlockId(), oldBlock.getBlock().getBlockId()); assertEquals(newBlock.getBlock().getGenerationStamp(), oldBlock.getBlock().getGenerationStamp() + 1); @@ -834,17 +828,15 @@ public void testCopyOnTruncateWithDataNodesRestart() throws Exception { DFSTestUtil.waitReplication(fs, p, REPLICATION); FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(dn); // New block is replicated to dn1 - assertEquals(utils.getStoredDataLength(newBlock.getBlock()), - newBlock.getBlockSize()); + assertEquals(utils.getStoredDataLength(newBlock.getBlock()), newBlock.getBlockSize()); // Old replica exists too since there is snapshot - assertEquals(utils.getStoredDataLength(oldBlock.getBlock()), - oldBlock.getBlockSize()); + assertEquals(utils.getStoredDataLength(oldBlock.getBlock()), oldBlock.getBlockSize()); assertEquals(utils.getStoredGenerationStamp(oldBlock.getBlock()), oldBlock.getBlock().getGenerationStamp()); // Validate the file FileStatus fileStatus = fs.getFileStatus(p); - assertThat(fileStatus.getLen(), is((long) newLength)); + assertThat(fileStatus.getLen()).isEqualTo((long) newLength); checkFullFile(p, newLength, contents); fs.deleteSnapshot(parent, "ss0"); @@ -855,7 +847,8 @@ public void testCopyOnTruncateWithDataNodesRestart() throws Exception { * The last block is truncated at mid. (non copy-on-truncate) * dn0, dn1 are restarted immediately after truncate. */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testTruncateWithDataNodesRestartImmediately() throws Exception { int startingFileSize = 3 * BLOCK_SIZE; byte[] contents = AppendTestUtil.initBuffer(startingFileSize); @@ -881,8 +874,7 @@ public void testTruncateWithDataNodesRestartImmediately() throws Exception { * For non copy-on-truncate, the truncated block id is the same, but the * GS should increase. */ - assertEquals(newBlock.getBlock().getBlockId(), - oldBlock.getBlock().getBlockId()); + assertEquals(newBlock.getBlock().getBlockId(), oldBlock.getBlock().getBlockId()); assertEquals(newBlock.getBlock().getGenerationStamp(), oldBlock.getBlock().getGenerationStamp() + 1); @@ -893,21 +885,19 @@ public void testTruncateWithDataNodesRestartImmediately() throws Exception { DFSTestUtil.waitReplication(fs, p, REPLICATION); // Old replica is disregarded and replaced with the truncated one on dn0 FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(dn0); - assertEquals(utils.getStoredDataLength(newBlock.getBlock()), - newBlock.getBlockSize()); + assertEquals(utils.getStoredDataLength(newBlock.getBlock()), newBlock.getBlockSize()); assertEquals(utils.getStoredGenerationStamp(newBlock.getBlock()), newBlock.getBlock().getGenerationStamp()); // Old replica is disregarded and replaced with the truncated one on dn1 utils = cluster.getFsDatasetTestUtils(dn1); - assertEquals(utils.getStoredDataLength(newBlock.getBlock()), - newBlock.getBlockSize()); + assertEquals(utils.getStoredDataLength(newBlock.getBlock()), newBlock.getBlockSize()); assertEquals(utils.getStoredGenerationStamp(newBlock.getBlock()), newBlock.getBlock().getGenerationStamp()); // Validate the file FileStatus fileStatus = fs.getFileStatus(p); - assertThat(fileStatus.getLen(), is((long) newLength)); + assertThat(fileStatus.getLen()).isEqualTo((long) newLength); checkFullFile(p, newLength, contents); fs.delete(parent, true); @@ -917,7 +907,8 @@ public void testTruncateWithDataNodesRestartImmediately() throws Exception { * The last block is truncated at mid. (non copy-on-truncate) * shutdown the datanodes immediately after truncate. */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testTruncateWithDataNodesShutdownImmediately() throws Exception { int startingFileSize = 3 * BLOCK_SIZE; byte[] contents = AppendTestUtil.initBuffer(startingFileSize); @@ -936,7 +927,7 @@ public void testTruncateWithDataNodesShutdownImmediately() throws Exception { for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) { Thread.sleep(SLEEP); } - assertFalse("All DataNodes should be down.", cluster.isDataNodeUp()); + assertFalse(cluster.isDataNodeUp(), "All DataNodes should be down."); LocatedBlocks blocks = getLocatedBlocks(p); assertTrue(blocks.isUnderConstruction()); } finally { @@ -968,9 +959,9 @@ public void testTruncateEditLogLoad() throws IOException { int newLength = startingFileSize - toTruncate; boolean isReady = fs.truncate(p, newLength); - assertThat("truncate should have triggered block recovery.", - isReady, is(false)); - + assertThat(isReady) + .as("truncate should have triggered block recovery.") + .isFalse(); cluster.restartNameNode(); String holder = UserGroupInformation.getCurrentUser().getUserName(); @@ -1001,9 +992,9 @@ public void testUpgradeAndRestart() throws IOException { int newLengthBeforeUpgrade = startingFileSize - toTruncate; boolean isReady = fs.truncate(p, newLengthBeforeUpgrade); - assertThat("truncate should have triggered block recovery.", - isReady, is(false)); - + assertThat(isReady) + .as("truncate should have triggered block recovery.") + .isFalse(); checkBlockRecovery(p); checkFullFile(p, newLengthBeforeUpgrade, contents); @@ -1012,43 +1003,49 @@ public void testUpgradeAndRestart() throws IOException { restartCluster(StartupOption.UPGRADE); - assertThat("SafeMode should be OFF", - cluster.getNamesystem().isInSafeMode(), is(false)); - assertThat("NameNode should be performing upgrade.", - cluster.getNamesystem().isUpgradeFinalized(), is(false)); + assertThat(cluster.getNamesystem().isInSafeMode()) + .as("SafeMode should be OFF") + .isFalse(); + assertThat(cluster.getNamesystem().isUpgradeFinalized()) + .as("NameNode should be performing upgrade.") + .isFalse(); FileStatus fileStatus = fs.getFileStatus(p); - assertThat(fileStatus.getLen(), is((long) newLengthBeforeUpgrade)); + assertThat(fileStatus.getLen()).isEqualTo((long) newLengthBeforeUpgrade); int newLengthAfterUpgrade = newLengthBeforeUpgrade - toTruncate; Block oldBlk = getLocatedBlocks(p).getLastLocatedBlock() .getBlock().getLocalBlock(); isReady = fs.truncate(p, newLengthAfterUpgrade); - assertThat("truncate should have triggered block recovery.", - isReady, is(false)); + assertThat(isReady) + .as("truncate should have triggered block recovery.") + .isFalse(); fileStatus = fs.getFileStatus(p); - assertThat(fileStatus.getLen(), is((long) newLengthAfterUpgrade)); - assertThat("Should copy on truncate during upgrade", - getLocatedBlocks(p).getLastLocatedBlock().getBlock() - .getLocalBlock().getBlockId(), is(not(equalTo(oldBlk.getBlockId())))); + assertThat(fileStatus.getLen()).isEqualTo((long) newLengthAfterUpgrade); + assertThat(getLocatedBlocks(p).getLastLocatedBlock().getBlock() + .getLocalBlock().getBlockId()) + .as("Should copy on truncate during upgrade") + .isNotEqualTo(oldBlk.getBlockId()); checkBlockRecovery(p); checkFullFile(p, newLengthAfterUpgrade, contents); - assertThat("Total block count should be unchanged from copy-on-truncate", - cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore)); + assertThat(cluster.getNamesystem().getBlocksTotal()) + .as("Total block count should be unchanged from copy-on-truncate") + .isEqualTo(totalBlockBefore); restartCluster(StartupOption.ROLLBACK); - assertThat("File does not exist " + p, fs.exists(p), is(true)); + assertThat(fs.exists(p)).as("File does not exist " + p).isTrue(); fileStatus = fs.getFileStatus(p); - assertThat(fileStatus.getLen(), is((long) newLengthBeforeUpgrade)); + assertThat(fileStatus.getLen()).isEqualTo((long) newLengthBeforeUpgrade); checkFullFile(p, newLengthBeforeUpgrade, contents); - assertThat("Total block count should be unchanged from rolling back", - cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore)); - + assertThat(cluster.getNamesystem().getBlocksTotal()) + .as("Total block count should be unchanged from rolling back") + .isEqualTo(totalBlockBefore); restartCluster(StartupOption.REGULAR); - assertThat("Total block count should be unchanged from start-up", - cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore)); + assertThat(cluster.getNamesystem().getBlocksTotal()) + .as("Total block count should be unchanged from start-up") + .isEqualTo(totalBlockBefore); checkFullFile(p, newLengthBeforeUpgrade, contents); assertFileLength(snapshotFile, startingFileSize); @@ -1056,14 +1053,17 @@ public void testUpgradeAndRestart() throws IOException { fs.setSafeMode(SafeModeAction.ENTER); fs.saveNamespace(); cluster.restartNameNode(true); - assertThat("Total block count should be unchanged from start-up", - cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore)); + assertThat(cluster.getNamesystem().getBlocksTotal()) + .as("Total block count should be unchanged from start-up") + .isEqualTo(totalBlockBefore); checkFullFile(p, newLengthBeforeUpgrade, contents); assertFileLength(snapshotFile, startingFileSize); fs.deleteSnapshot(parent, "ss0"); fs.delete(parent, true); - assertThat("File " + p + " shouldn't exist", fs.exists(p), is(false)); + assertThat(fs.exists(p)) + .as("File " + p + " shouldn't exist") + .isFalse(); } /** @@ -1090,17 +1090,15 @@ public void testTruncateRecovery() throws IOException { Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip, client, clientMachine, 1, null); // In-place truncate uses old block id with new genStamp. - assertThat(truncateBlock.getBlockId(), - is(equalTo(oldBlock.getBlockId()))); - assertThat(truncateBlock.getNumBytes(), - is(oldBlock.getNumBytes())); - assertThat(truncateBlock.getGenerationStamp(), - is(fsn.getBlockManager().getBlockIdManager().getGenerationStamp())); - assertThat(file.getLastBlock().getBlockUCState(), - is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY)); + assertThat(truncateBlock.getBlockId()).isEqualTo(oldBlock.getBlockId()); + assertThat(truncateBlock.getNumBytes()).isEqualTo(oldBlock.getNumBytes()); + assertThat(truncateBlock.getGenerationStamp()) + .isEqualTo(fsn.getBlockManager().getBlockIdManager().getGenerationStamp()); + assertThat(file.getLastBlock().getBlockUCState()) + .isEqualTo(HdfsServerConstants.BlockUCState.UNDER_RECOVERY); long blockRecoveryId = file.getLastBlock().getUnderConstructionFeature() .getBlockRecoveryId(); - assertThat(blockRecoveryId, is(initialGenStamp + 1)); + assertThat(blockRecoveryId).isEqualTo(initialGenStamp + 1); fsn.getEditLog().logTruncate( src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock); } finally { @@ -1115,7 +1113,7 @@ public void testTruncateRecovery() throws IOException { file = iip.getLastINode().asFile(); file.recordModification(iip.getLatestSnapshotId(), true); assertThat(file.isBlockInLatestSnapshot( - (BlockInfoContiguous) file.getLastBlock()), is(true)); + (BlockInfoContiguous) file.getLastBlock())).isEqualTo(true); initialGenStamp = file.getLastBlock().getGenerationStamp(); // Test that prepareFileForTruncate sets up copy-on-write truncate fsn.writeLock(RwLockMode.GLOBAL); @@ -1124,17 +1122,15 @@ public void testTruncateRecovery() throws IOException { Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip, client, clientMachine, 1, null); // Copy-on-write truncate makes new block with new id and genStamp - assertThat(truncateBlock.getBlockId(), - is(not(equalTo(oldBlock.getBlockId())))); - assertThat(truncateBlock.getNumBytes() < oldBlock.getNumBytes(), - is(true)); - assertThat(truncateBlock.getGenerationStamp(), - is(fsn.getBlockManager().getBlockIdManager().getGenerationStamp())); - assertThat(file.getLastBlock().getBlockUCState(), - is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY)); + assertThat(truncateBlock.getBlockId()).isNotEqualTo(oldBlock.getBlockId()); + assertThat(truncateBlock.getNumBytes() < oldBlock.getNumBytes()).isEqualTo(true); + assertThat(truncateBlock.getGenerationStamp()) + .isEqualTo(fsn.getBlockManager().getBlockIdManager().getGenerationStamp()); + assertThat(file.getLastBlock().getBlockUCState()) + .isEqualTo(HdfsServerConstants.BlockUCState.UNDER_RECOVERY); long blockRecoveryId = file.getLastBlock().getUnderConstructionFeature() .getBlockRecoveryId(); - assertThat(blockRecoveryId, is(initialGenStamp + 1)); + assertThat(blockRecoveryId).isEqualTo(initialGenStamp + 1); fsn.getEditLog().logTruncate( src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock); } finally { @@ -1157,7 +1153,7 @@ public void testTruncateShellCommand() throws Exception { // wait for block recovery checkBlockRecovery(src); - assertThat(fs.getFileStatus(src).getLen(), is((long) newLength)); + assertThat(fs.getFileStatus(src).getLen()).isEqualTo((long) newLength); fs.delete(parent, true); } @@ -1172,7 +1168,7 @@ public void testTruncateShellCommandOnBlockBoundary() throws Exception { runTruncateShellCommand(src, oldLength, argv); // shouldn't need to wait for block recovery - assertThat(fs.getFileStatus(src).getLen(), is((long) newLength)); + assertThat(fs.getFileStatus(src).getLen()).isEqualTo((long) newLength); fs.delete(parent, true); } @@ -1187,7 +1183,7 @@ public void testTruncateShellCommandWithWaitOption() throws Exception { runTruncateShellCommand(src, oldLength, argv); // shouldn't need to wait for block recovery - assertThat(fs.getFileStatus(src).getLen(), is((long) newLength)); + assertThat(fs.getFileStatus(src).getLen()).isEqualTo((long) newLength); fs.delete(parent, true); } @@ -1195,13 +1191,13 @@ private void runTruncateShellCommand(Path src, int oldLength, String[] shellOpts) throws Exception { // create file and write data writeContents(AppendTestUtil.initBuffer(oldLength), oldLength, src); - assertThat(fs.getFileStatus(src).getLen(), is((long)oldLength)); + assertThat(fs.getFileStatus(src).getLen()).isEqualTo((long) oldLength); // truncate file using shell FsShell shell = null; try { shell = new FsShell(conf); - assertThat(ToolRunner.run(shell, shellOpts), is(0)); + assertThat(ToolRunner.run(shell, shellOpts)).isEqualTo(0); } finally { if(shell != null) { shell.close(); @@ -1224,14 +1220,13 @@ public void testTruncate4Symlink() throws IOException { final int newLength = fileLength/3; boolean isReady = fs.truncate(link, newLength); - assertTrue("Recovery is not expected.", isReady); + assertTrue(isReady, "Recovery is not expected."); FileStatus fileStatus = fs.getFileStatus(file); - assertThat(fileStatus.getLen(), is((long) newLength)); + assertThat(fileStatus.getLen()).isEqualTo((long) newLength); ContentSummary cs = fs.getContentSummary(parent); - assertEquals("Bad disk space usage", - cs.getSpaceConsumed(), newLength * REPLICATION); + assertEquals(cs.getSpaceConsumed(), newLength * REPLICATION, "Bad disk space usage"); // validate the file content checkFullFile(file, newLength, contents); @@ -1250,7 +1245,7 @@ public void testTruncateWithRollingUpgrade() throws Exception { //start rolling upgrade dfs.setSafeMode(SafeModeAction.ENTER); int status = dfsadmin.run(new String[]{"-rollingUpgrade", "prepare"}); - assertEquals("could not prepare for rolling upgrade", 0, status); + assertEquals(0, status, "could not prepare for rolling upgrade"); dfs.setSafeMode(SafeModeAction.LEAVE); Path dir = new Path("/testTruncateWithRollingUpgrade"); @@ -1260,23 +1255,20 @@ public void testTruncateWithRollingUpgrade() throws Exception { ThreadLocalRandom.current().nextBytes(data); writeContents(data, data.length, p); - assertEquals("block num should 1", 1, - cluster.getNamesystem().getFSDirectory().getBlockManager() - .getTotalBlocks()); + assertEquals(1, cluster.getNamesystem().getFSDirectory().getBlockManager().getTotalBlocks(), + "block num should 1"); final boolean isReady = fs.truncate(p, 2); - assertFalse("should be copy-on-truncate", isReady); - assertEquals("block num should 2", 2, - cluster.getNamesystem().getFSDirectory().getBlockManager() - .getTotalBlocks()); + assertFalse(isReady, "should be copy-on-truncate"); + assertEquals(2, cluster.getNamesystem().getFSDirectory().getBlockManager().getTotalBlocks(), + "block num should 2"); fs.delete(p, true); BlockManagerTestUtil.waitForMarkedDeleteQueueIsEmpty( cluster.getNamesystem().getBlockManager()); - assertEquals("block num should 0", 0, - cluster.getNamesystem().getFSDirectory().getBlockManager() - .getTotalBlocks()); + assertEquals(0, cluster.getNamesystem().getFSDirectory().getBlockManager().getTotalBlocks(), + "block num should 0"); status = dfsadmin.run(new String[]{"-rollingUpgrade", "finalize"}); - assertEquals("could not finalize rolling upgrade", 0, status); + assertEquals(0, status, "could not finalize rolling upgrade"); } static void writeContents(byte[] contents, int fileLength, Path p) @@ -1309,8 +1301,9 @@ public static void checkBlockRecovery(Path p, DistributedFileSystem dfs, } try { Thread.sleep(sleepMs); } catch (InterruptedException ignored) {} } - assertThat("inode should complete in ~" + sleepMs * attempts + " ms.", - success, is(true)); + assertThat(success) + .as("inode should complete in ~" + sleepMs * attempts + " ms.") + .isTrue(); } static LocatedBlocks getLocatedBlocks(Path src) throws IOException { @@ -1323,18 +1316,18 @@ static LocatedBlocks getLocatedBlocks(Path src, DistributedFileSystem dfs) } static void assertBlockExists(Block blk) { - assertNotNull("BlocksMap does not contain block: " + blk, - cluster.getNamesystem().getStoredBlock(blk)); + assertNotNull(cluster.getNamesystem().getStoredBlock(blk), + "BlocksMap does not contain block: " + blk); } static void assertBlockNotPresent(Block blk) { - assertNull("BlocksMap should not contain block: " + blk, - cluster.getNamesystem().getStoredBlock(blk)); + assertNull(cluster.getNamesystem().getStoredBlock(blk), + "BlocksMap should not contain block: " + blk); } static void assertFileLength(Path file, long length) throws IOException { byte[] data = DFSTestUtil.readFileBuffer(fs, file); - assertEquals("Wrong data size in snapshot.", length, data.length); + assertEquals(length, data.length, "Wrong data size in snapshot."); } static void checkFullFile(Path p, int newLength, byte[] contents) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsImageValidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsImageValidation.java index 029e11e806ea8..c82a50b91bf9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsImageValidation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsImageValidation.java @@ -21,12 +21,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + public class TestFsImageValidation { static final Logger LOG = LoggerFactory.getLogger( TestFsImageValidation.class); @@ -51,7 +54,7 @@ public void testValidation() throws Exception { try { final int errorCount = FsImageValidation.newInstance().run(); - Assert.assertEquals("Error Count: " + errorCount, 0, errorCount); + assertEquals(0, errorCount, "Error Count: " + errorCount); } catch (HadoopIllegalArgumentException e) { LOG.warn("The environment variable " + FsImageValidation.FS_IMAGE + " is not set", e); @@ -63,7 +66,7 @@ public void testHaConf() { final Configuration conf = new Configuration(); final String nsId = "cluster0"; FsImageValidation.setHaConf(nsId, conf); - Assert.assertTrue(HAUtil.isHAEnabled(conf, nsId)); + assertTrue(HAUtil.isHAEnabled(conf, nsId)); } @Test @@ -81,14 +84,14 @@ static void runTestToCommaSeparatedNumber(long n) { LOG.info("{} ?= {}", n, s); for(int i = s.length(); i > 0;) { for(int j = 0; j < 3 && i > 0; j++) { - Assert.assertTrue(Character.isDigit(s.charAt(--i))); + assertTrue(Character.isDigit(s.charAt(--i))); } if (i > 0) { - Assert.assertEquals(',', s.charAt(--i)); + assertEquals(',', s.charAt(--i)); } } - Assert.assertNotEquals(0, s.length()%4); - Assert.assertEquals(n, Long.parseLong(s.replaceAll(",", ""))); + assertNotEquals(0, s.length() % 4); + assertEquals(n, Long.parseLong(s.replaceAll(",", ""))); } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java index dd9ca22751021..79651211d1c63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java @@ -19,8 +19,8 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -39,8 +39,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; public class TestFsLimits { static Configuration conf; @@ -59,7 +59,7 @@ static private FSNamesystem getMockNamesystem() throws IOException { return fsn; } - @Before + @BeforeEach public void setUp() throws IOException { conf = new Configuration(); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 85281451d3b9e..d9bc28e2d5227 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -20,11 +20,11 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CORRUPT_BLOCK_DELETE_IMMEDIATELY_ENABLED; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; @@ -118,14 +118,13 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.util.ToolRunner; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.LoggerFactory; /** @@ -178,30 +177,31 @@ public static String runFsck(Configuration conf, int expectedErrCode, return bStream.toString(); } - @Rule - public TemporaryFolder baseDir = new TemporaryFolder(); + @SuppressWarnings("checkstyle:VisibilityModifier") + @TempDir + java.nio.file.Path baseDir; private MiniDFSCluster cluster = null; private Configuration conf = null; - @BeforeClass + @BeforeAll public static void beforeClass() { auditLogCapture = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); } - @AfterClass + @AfterAll public static void afterClass() { auditLogCapture.stopCapturing(); } - @Before + @BeforeEach public void setUp() throws Exception { conf = new Configuration(); conf.setBoolean(DFS_NAMENODE_CORRUPT_BLOCK_DELETE_IMMEDIATELY_ENABLED, false); } - @After + @AfterEach public void tearDown() throws Exception { shutdownCluster(); } @@ -222,7 +222,7 @@ public void testFsck() throws Exception { conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(4).build(); fs = cluster.getFileSystem(); final String fileName = "/srcdat"; @@ -239,7 +239,7 @@ public void testFsck() throws Exception { shutdownCluster(); // restart the cluster; bring up namenode but not the data nodes - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(0).format(false).build(); outStr = runFsck(conf, 1, true, "/"); // expect the result is corrupt @@ -286,7 +286,7 @@ public void testFsckNonExistent() throws Exception { setNumFiles(20).build(); FileSystem fs = null; conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(4).build(); fs = cluster.getFileSystem(); util.createFiles(fs, "/srcdat"); @@ -305,7 +305,7 @@ public void testFsckPermission() throws Exception { conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); // Create a cluster with the current user, write some files - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(4).build(); final MiniDFSCluster c2 = cluster; final String dir = "/dfsck"; @@ -352,7 +352,7 @@ public void testFsckMove() throws Exception { DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3, (5 * dfsBlockSize) + (dfsBlockSize - 1), 5 * dfsBlockSize); FileSystem fs = null; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(numDatanodes).build(); String topDir = "/srcdat"; fs = cluster.getFileSystem(); @@ -559,7 +559,7 @@ public void testFsckMoveAndDelete() throws Exception { FileSystem fs = null; conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(4).build(); String topDir = "/srcdat"; fs = cluster.getFileSystem(); @@ -624,7 +624,7 @@ public void testFsckOpenFiles() throws Exception { setNumFiles(4).build(); FileSystem fs = null; conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(4).build(); String topDir = "/srcdat"; String randomString = "HADOOP "; @@ -678,7 +678,7 @@ public void testFsckOpenECFiles() throws Exception { final int numAllUnits = dataBlocks + ecPolicy.getNumParityUnits(); int blockSize = 2 * cellSize; conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).numDataNodes( + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()).numDataNodes( numAllUnits + 1).build(); String topDir = "/myDir"; cluster.waitActive(); @@ -769,7 +769,7 @@ public void testCorruptBlock() throws Exception { String outStr = null; short factor = 1; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); @@ -841,7 +841,7 @@ public void testUnderMinReplicatedBlock() throws Exception { Random random = new Random(); String outStr = null; short factor = 1; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(2).build(); cluster.waitActive(); fs = cluster.getFileSystem(); @@ -900,7 +900,9 @@ public void testUnderMinReplicatedBlock() throws Exception { assertTrue(outStr.contains("MINIMAL BLOCK REPLICATION:\t2")); } - @Test(timeout = 90000) + @Test + @Timeout(value = 90) + @SuppressWarnings("checkstyle:methodlength") public void testFsckReplicaDetails() throws Exception { final short replFactor = 1; @@ -914,7 +916,7 @@ public void testFsckReplicaDetails() throws Exception { conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); DistributedFileSystem dfs; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(numDn).hosts(hosts).racks(racks).build(); cluster.waitClusterUp(); dfs = cluster.getFileSystem(); @@ -1060,7 +1062,7 @@ public Boolean get() { @Test public void testFsckError() throws Exception { // bring up a one-node cluster - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).build(); + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()).build(); String fileName = "/test.txt"; Path filePath = new Path(fileName); FileSystem fs = cluster.getFileSystem(); @@ -1092,7 +1094,7 @@ public void testFsckListCorruptFilesBlocks() throws Exception { conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); FileSystem fs = null; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).build(); + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil util = new DFSTestUtil.Builder(). @@ -1118,8 +1120,8 @@ public void testFsckListCorruptFilesBlocks() throws Exception { } for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); - assertTrue("Cannot remove file.", blockFile.delete()); - assertTrue("Cannot remove file.", metadataFile.delete()); + assertTrue(blockFile.delete(), "Cannot remove file."); + assertTrue(metadataFile.delete(), "Cannot remove file."); } } } @@ -1151,7 +1153,7 @@ public void testFsckListCorruptFilesBlocks() throws Exception { @Test public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception { // bring up a one-node cluster - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).build(); + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()).build(); String fileName = "/test.txt"; Path filePath = new Path(fileName); FileSystem fs = cluster.getFileSystem(); @@ -1195,12 +1197,12 @@ public void testFsckMissingReplicas() throws IOException { DistributedFileSystem dfs = null; // Startup a minicluster - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(numReplicas).build(); - assertNotNull("Failed Cluster Creation", cluster); + assertNotNull(cluster, "Failed Cluster Creation"); cluster.waitClusterUp(); dfs = cluster.getFileSystem(); - assertNotNull("Failed to get FileSystem", dfs); + assertNotNull(dfs, "Failed to get FileSystem"); // Create a file that will be intentionally under-replicated final String pathString = "/testfile"; @@ -1228,8 +1230,7 @@ public void testFsckMissingReplicas() throws IOException { fsck.check(pathString, file, replRes, ecRes); // Also print the output from the fsck, for ex post facto sanity checks System.out.println(result.toString()); - assertEquals(replRes.missingReplicas, - (numBlocks*replFactor) - (numBlocks*numReplicas)); + assertEquals(replRes.missingReplicas, (numBlocks * replFactor) - (numBlocks * numReplicas)); assertEquals(replRes.numExpectedReplicas, numBlocks*replFactor); } @@ -1256,12 +1257,12 @@ public void testFsckMisPlacedReplicas() throws IOException { DistributedFileSystem dfs = null; // Startup a minicluster - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(numDn).hosts(hosts).racks(racks).build(); - assertNotNull("Failed Cluster Creation", cluster); + assertNotNull(cluster, "Failed Cluster Creation"); cluster.waitClusterUp(); dfs = cluster.getFileSystem(); - assertNotNull("Failed to get FileSystem", dfs); + assertNotNull(dfs, "Failed to get FileSystem"); // Create a file that will be intentionally under-replicated final String pathString = "/testfile"; @@ -1364,7 +1365,7 @@ public void testFsckSymlink() throws Exception { conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(4).build(); fs = cluster.getFileSystem(); final String fileName = "/srcdat"; @@ -1391,7 +1392,7 @@ public void testFsckSymlink() throws Exception { */ @Test public void testFsckForSnapshotFiles() throws Exception { - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).numDataNodes(1) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()).numDataNodes(1) .build(); String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots", "-files"); @@ -1426,13 +1427,13 @@ public void testBlockIdCK() throws Exception { conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2); DistributedFileSystem dfs = null; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(numDn).hosts(hosts).racks(racks).build(); - assertNotNull("Failed Cluster Creation", cluster); + assertNotNull(cluster, "Failed Cluster Creation"); cluster.waitClusterUp(); dfs = cluster.getFileSystem(); - assertNotNull("Failed to get FileSystem", dfs); + assertNotNull(dfs, "Failed to get FileSystem"); DFSTestUtil util = new DFSTestUtil.Builder(). setName(getClass().getSimpleName()).setNumFiles(1).build(); @@ -1480,13 +1481,13 @@ public void testBlockIdCKDecommission() throws Exception { conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2); DistributedFileSystem dfs; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(numDn).hosts(hosts).racks(racks).build(); - assertNotNull("Failed Cluster Creation", cluster); + assertNotNull(cluster, "Failed Cluster Creation"); cluster.waitClusterUp(); dfs = cluster.getFileSystem(); - assertNotNull("Failed to get FileSystem", dfs); + assertNotNull(dfs, "Failed to get FileSystem"); DFSTestUtil util = new DFSTestUtil.Builder(). setName(getClass().getSimpleName()).setNumFiles(1).build(); @@ -1549,7 +1550,8 @@ public void testBlockIdCKDecommission() throws Exception { /** * Test for blockIdCK with datanode maintenance. */ - @Test (timeout = 90000) + @Test + @Timeout(value = 90) public void testBlockIdCKMaintenance() throws Exception { final short replFactor = 2; short numDn = 2; @@ -1564,16 +1566,16 @@ public void testBlockIdCKMaintenance() throws Exception { replFactor); DistributedFileSystem dfs; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(numDn) .hosts(hosts) .racks(racks) .build(); - assertNotNull("Failed Cluster Creation", cluster); + assertNotNull(cluster, "Failed Cluster Creation"); cluster.waitClusterUp(); dfs = cluster.getFileSystem(); - assertNotNull("Failed to get FileSystem", dfs); + assertNotNull(dfs, "Failed to get FileSystem"); DFSTestUtil util = new DFSTestUtil.Builder(). setName(getClass().getSimpleName()).setNumFiles(1).build(); @@ -1685,10 +1687,10 @@ public void testBlockIdCKStaleness() throws Exception { File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); cluster = new MiniDFSCluster.Builder(configuration, builderBaseDir) .hosts(hosts).racks(racks).build(); - assertNotNull("Failed Cluster Creation", cluster); + assertNotNull(cluster, "Failed Cluster Creation"); cluster.waitClusterUp(); DistributedFileSystem fs = cluster.getFileSystem(); - assertNotNull("Failed to get FileSystem", fs); + assertNotNull(fs, "Failed to get FileSystem"); try { DFSTestUtil util = new DFSTestUtil.Builder(). @@ -1770,13 +1772,13 @@ public void testBlockIdCKCorruption() throws Exception { conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); DistributedFileSystem dfs = null; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(numDn).hosts(hosts).racks(racks).build(); - assertNotNull("Failed Cluster Creation", cluster); + assertNotNull(cluster, "Failed Cluster Creation"); cluster.waitClusterUp(); dfs = cluster.getFileSystem(); - assertNotNull("Failed to get FileSystem", dfs); + assertNotNull(dfs, "Failed to get FileSystem"); DFSTestUtil util = new DFSTestUtil.Builder(). setName(getClass().getSimpleName()).setNumFiles(1).build(); @@ -1883,7 +1885,7 @@ private void writeFile(final DistributedFileSystem dfs, */ @Test public void testStoragePoliciesCK() throws Exception { - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(3) .storageTypes( new StorageType[] {StorageType.DISK, StorageType.ARCHIVE}) @@ -1926,13 +1928,13 @@ public void testFsckWithDecommissionedReplicas() throws Exception { conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); DistributedFileSystem dfs; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(numDn).hosts(hosts).racks(racks).build(); - assertNotNull("Failed Cluster Creation", cluster); + assertNotNull(cluster, "Failed Cluster Creation"); cluster.waitClusterUp(); dfs = cluster.getFileSystem(); - assertNotNull("Failed to get FileSystem", dfs); + assertNotNull(dfs, "Failed to get FileSystem"); DFSTestUtil util = new DFSTestUtil.Builder(). setName(getClass().getSimpleName()).setNumFiles(1).build(); @@ -1992,7 +1994,8 @@ public void testFsckWithDecommissionedReplicas() throws Exception { /** * Test for blocks on maintenance hosts are not shown as missing. */ - @Test (timeout = 90000) + @Test + @Timeout(value = 90) public void testFsckWithMaintenanceReplicas() throws Exception { final short replFactor = 2; short numDn = 2; @@ -2007,16 +2010,16 @@ public void testFsckWithMaintenanceReplicas() throws Exception { replFactor); DistributedFileSystem dfs; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(numDn) .hosts(hosts) .racks(racks) .build(); - assertNotNull("Failed Cluster Creation", cluster); + assertNotNull(cluster, "Failed Cluster Creation"); cluster.waitClusterUp(); dfs = cluster.getFileSystem(); - assertNotNull("Failed to get FileSystem", dfs); + assertNotNull(dfs, "Failed to get FileSystem"); DFSTestUtil util = new DFSTestUtil.Builder(). setName(getClass().getSimpleName()).setNumFiles(1).build(); @@ -2115,7 +2118,7 @@ public void testECFsck() throws Exception { int parityBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits(); int totalSize = dataBlocks + parityBlocks; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(totalSize).build(); fs = cluster.getFileSystem(); fs.enableErasureCodingPolicy( @@ -2150,7 +2153,7 @@ public void testECFsck() throws Exception { shutdownCluster(); // restart the cluster; bring up namenode but not the data nodes - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(0).format(false).build(); outStr = runFsck(conf, 1, true, "/", "-files", "-blocks"); // expect the result is corrupt @@ -2182,7 +2185,7 @@ public void testFsckListCorruptSnapshotFiles() throws Exception { int numFiles = 3; int numSnapshots = 0; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).build(); + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); DFSTestUtil util = new DFSTestUtil.Builder(). @@ -2216,8 +2219,8 @@ public void testFsckListCorruptSnapshotFiles() throws Exception { } for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); - assertTrue("Cannot remove file.", blockFile.delete()); - assertTrue("Cannot remove file.", metadataFile.delete()); + assertTrue(blockFile.delete(), "Cannot remove file."); + assertTrue(metadataFile.delete(), "Cannot remove file."); } } } @@ -2267,7 +2270,8 @@ private void waitForCorruptionBlocks(int corruptBlocks, String path) }, 100, 10000); } - @Test (timeout = 300000) + @Test + @Timeout(value = 300) public void testFsckMoveAfterCorruption() throws Exception { final int dfsBlockSize = 512 * 1024; final int numDatanodes = 1; @@ -2276,7 +2280,7 @@ public void testFsckMoveAfterCorruption() throws Exception { conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replication); - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).build(); + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()).build(); DistributedFileSystem dfs = cluster.getFileSystem(); cluster.waitActive(); @@ -2315,7 +2319,7 @@ public Boolean get() { } } if (numCorrupt == null) { - Assert.fail("Cannot find corrupt blocks count in fsck output."); + fail("Cannot find corrupt blocks count in fsck output."); } if (Integer.parseInt(numCorrupt) == ctf.getTotalMissingBlocks()) { assertTrue(str.contains(NamenodeFsck.CORRUPT_STATUS)); @@ -2323,7 +2327,7 @@ public Boolean get() { } } catch (Exception e) { LOG.error("Exception caught", e); - Assert.fail("Caught unexpected exception."); + fail("Caught unexpected exception."); } return false; } @@ -2347,11 +2351,12 @@ public Boolean get() { for (LocatedFileStatus lfs: retVal) { totalLength += lfs.getLen(); } - Assert.assertTrue("Nothing is moved to lost+found!", totalLength > 0); + assertTrue(totalLength > 0, "Nothing is moved to lost+found!"); util.cleanup(dfs, srcDir); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testFsckUpgradeDomain() throws Exception { testUpgradeDomain(false, false); testUpgradeDomain(false, true); @@ -2378,7 +2383,7 @@ private void testUpgradeDomain(boolean defineUpgradeDomain, } DistributedFileSystem dfs; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).numDataNodes(numDN). + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()).numDataNodes(numDN). hosts(hosts).racks(racks).build(); cluster.waitClusterUp(); dfs = cluster.getFileSystem(); @@ -2405,8 +2410,7 @@ private void testUpgradeDomain(boolean defineUpgradeDomain, assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS)); String udValue = defineUpgradeDomain ? upgradeDomain : NamenodeFsck.UNDEFINED; - assertEquals(displayUpgradeDomain, - fsckOut.contains("(ud=" + udValue + ")")); + assertEquals(displayUpgradeDomain, fsckOut.contains("(ud=" + udValue + ")")); } finally { if (defineUpgradeDomain) { hostsFileWriter.cleanup(); @@ -2414,7 +2418,8 @@ private void testUpgradeDomain(boolean defineUpgradeDomain, } } - @Test (timeout = 300000) + @Test + @Timeout(value = 300) public void testFsckCorruptECFile() throws Exception { DistributedFileSystem fs = null; int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits(); @@ -2422,7 +2427,7 @@ public void testFsckCorruptECFile() throws Exception { StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits(); int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize(); int totalSize = dataBlocks + parityBlocks; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(totalSize).build(); fs = cluster.getFileSystem(); fs.enableErasureCodingPolicy( @@ -2455,7 +2460,7 @@ public void testFsckCorruptECFile() throws Exception { File storageDir = cluster.getInstanceStorageDir(dnIndex, 0); File blkFile = MiniDFSCluster.getBlockFile(storageDir, blks[i].getBlock()); - Assert.assertTrue("Block file does not exist", blkFile.exists()); + assertTrue(blkFile.exists(), "Block file does not exist"); FileOutputStream out = new FileOutputStream(blkFile); out.write("corruption".getBytes()); @@ -2485,7 +2490,8 @@ public void testFsckCorruptECFile() throws Exception { assertTrue(outStr.contains("has 1 CORRUPT blocks")); } - @Test (timeout = 300000) + @Test + @Timeout(value = 300) public void testFsckMissingECFile() throws Exception { DistributedFileSystem fs = null; int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits(); @@ -2493,7 +2499,7 @@ public void testFsckMissingECFile() throws Exception { StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits(); int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize(); int totalSize = dataBlocks + parityBlocks; - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .numDataNodes(totalSize).build(); fs = cluster.getFileSystem(); fs.enableErasureCodingPolicy( @@ -2553,7 +2559,7 @@ public void testFsckECBlockIdRedundantInternalBlocks() throws Exception { DFSTestUtil.createFile(fs, filePath, cellSize * dataBlocks * 2, (short) 1, 0L); LocatedBlocks blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0); LocatedStripedBlock block = (LocatedStripedBlock) blks.getLastLocatedBlock(); - Assert.assertEquals(groupSize, block.getLocations().length); + assertEquals(groupSize, block.getLocations().length); //general test. String runFsckResult = runFsck(conf, 0, true, "/", @@ -2577,7 +2583,7 @@ public void testFsckECBlockIdRedundantInternalBlocks() throws Exception { blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0); block = (LocatedStripedBlock) blks.getLastLocatedBlock(); - Assert.assertEquals(groupSize + 1, block.getLocations().length); + assertEquals(groupSize + 1, block.getLocations().length); //general test, number of redundant internal block replicas. runFsckResult = runFsck(conf, 0, true, "/", @@ -2606,17 +2612,18 @@ public Boolean get() { } } catch (Exception e) { LOG.error("Exception caught", e); - Assert.fail("Caught unexpected exception."); + fail("Caught unexpected exception."); } return false; } }, 1000, 60000); } - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testFsckCorruptWhenOneReplicaIsCorrupt() throws Exception { - try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()) + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()) .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(2) .build()) { cluster.waitActive(); @@ -2626,7 +2633,7 @@ public void testFsckCorruptWhenOneReplicaIsCorrupt() Path fileName = new Path(filePath); DFSTestUtil.createFile(fs, fileName, 512, (short) 2, 0); DFSTestUtil.waitReplication(fs, fileName, (short) 2); - Assert.assertTrue("File not created", fs.exists(fileName)); + assertTrue(fs.exists(fileName), "File not created"); cluster.getDataNodes().get(1).shutdown(); DFSTestUtil.appendFile(fs, fileName, "appendCorruptBlock"); cluster.restartDataNode(1, true); @@ -2645,7 +2652,7 @@ public void testFsckCorruptWhenOneReplicaIsCorrupt() @Test public void testFsckNonPrivilegedListCorrupt() throws Exception { - cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).numDataNodes(4).build(); + cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile()).numDataNodes(4).build(); UserGroupInformation ugi = UserGroupInformation.createUserForTesting("systest", new String[]{""}); ugi.doAs(new PrivilegedExceptionAction() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java index 5870fbb549884..c158fa5caccab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java @@ -35,10 +35,11 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.server.balancer.TestBalancer; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.event.Level; +import static org.junit.jupiter.api.Assertions.assertTrue; + /** * Test fsck with multiple NameNodes */ @@ -120,7 +121,7 @@ private void runTest(final int nNameNodes, final int nDataNodes, LOG.info("urls[" + i + "]=" + urls[i]); final String result = TestFsck.runFsck(conf, 0, false, urls[i]); LOG.info("result=" + result); - Assert.assertTrue(result.contains("Status: HEALTHY")); + assertTrue(result.contains("Status: HEALTHY")); } // Test viewfs @@ -137,7 +138,7 @@ private void runTest(final int nNameNodes, final int nDataNodes, LOG.info("vurls[" + i + "]=" + vurls[i]); final String result = TestFsck.runFsck(conf, 0, false, vurls[i]); LOG.info("result=" + result); - Assert.assertTrue(result.contains("Status: HEALTHY")); + assertTrue(result.contains("Status: HEALTHY")); } } finally { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java index 0f3e36a99a649..11c567bc43865 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java @@ -24,7 +24,8 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.util.RwLockMode; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -52,7 +53,8 @@ public class TestGetBlockLocations { private static final String RESERVED_PATH = "/.reserved/.inodes/" + MOCK_INODE_ID; - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testResolveReservedPath() throws IOException { FSNamesystem fsn = setupFileSystem(); FSEditLog editlog = fsn.getEditLog(); @@ -61,7 +63,8 @@ public void testResolveReservedPath() throws IOException { fsn.close(); } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testGetBlockLocationsRacingWithDelete() throws IOException { FSNamesystem fsn = spy(setupFileSystem()); final FSDirectory fsd = fsn.getFSDirectory(); @@ -95,7 +98,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { fsn.close(); } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testGetBlockLocationsRacingWithRename() throws IOException { FSNamesystem fsn = spy(setupFileSystem()); final FSDirectory fsd = fsn.getFSDirectory(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java index 03aa440a53460..cb8af03e5e555 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java @@ -28,16 +28,16 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.security.PrivilegedExceptionAction; import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * This class tests get content summary with permission settings. @@ -50,7 +50,7 @@ public class TestGetContentSummaryWithPermission { private MiniDFSCluster cluster; private DistributedFileSystem dfs; - @Before + @BeforeEach public void setUp() throws Exception { conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); @@ -61,7 +61,7 @@ public void setUp() throws Exception { dfs = cluster.getFileSystem(); } - @After + @AfterEach public void tearDown() throws Exception { if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java index c78f4e0fbe180..fd954c47dfe8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; @@ -32,7 +32,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.authorize.AccessControlList; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; public class TestGetImageServlet { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java index f2e9ec278ee08..f4dbcb7a6ed40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java @@ -18,12 +18,12 @@ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; @@ -48,10 +48,10 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; public class TestHDFSConcat { public static final Logger LOG = @@ -73,18 +73,18 @@ public class TestHDFSConcat { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); } - @Before + @BeforeEach public void startUpCluster() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build(); - assertNotNull("Failed Cluster Creation", cluster); + assertNotNull(cluster, "Failed Cluster Creation"); cluster.waitClusterUp(); dfs = cluster.getFileSystem(); - assertNotNull("Failed to get FileSystem", dfs); + assertNotNull(dfs, "Failed to get FileSystem"); nn = cluster.getNameNodeRpc(); - assertNotNull("Failed to get NameNode", nn); + assertNotNull(nn, "Failed to get NameNode"); } - @After + @AfterEach public void shutDownCluster() throws IOException { if(dfs != null) { dfs.close(); @@ -203,7 +203,7 @@ public void testConcat() throws IOException, InterruptedException { //verifications // 1. number of blocks - assertEquals(trgBlocks, totalBlocks); + assertEquals(trgBlocks, totalBlocks); // 2. file lengths assertEquals(trgLen, totalLen); @@ -211,7 +211,7 @@ public void testConcat() throws IOException, InterruptedException { // 3. removal of the src file for(Path p: files) { fStatus = nn.getFileInfo(p.toUri().getPath()); - assertNull("File " + p + " still exists", fStatus); // file shouldn't exist + assertNull(fStatus, "File " + p + " still exists"); // file shouldn't exist // try to create fie with the same name DFSTestUtil.createFile(dfs, p, fileLen, REPL_FACTOR, 1); } @@ -292,7 +292,7 @@ private void checkFileContent(byte[] concat, byte[][] bytes ) { if(mismatch) break; } - assertFalse("File content of concatenated file is different", mismatch); + assertFalse(mismatch, "File content of concatenated file is different"); } // test case when final block is not of a full length @@ -362,7 +362,7 @@ public void testConcatNotCompleteBlock() throws IOException { // 3. removal of the src file fStatus = nn.getFileInfo(name2); - assertNull("File "+name2+ "still exists", fStatus); // file shouldn't exist + assertNull(fStatus, "File " + name2 + "still exists"); // file shouldn't exist // 4. content checkFileContent(byteFileConcat, new byte [] [] {byteFile1, byteFile2}); @@ -447,15 +447,14 @@ public void testConcatWithQuotaDecrease() throws IOException { } ContentSummary summary = dfs.getContentSummary(foo); - Assert.assertEquals(11, summary.getFileCount()); - Assert.assertEquals(blockSize * REPL_FACTOR + - blockSize * 2 * srcRepl * srcNum, summary.getSpaceConsumed()); + assertEquals(11, summary.getFileCount()); + assertEquals(blockSize * REPL_FACTOR + blockSize * 2 * srcRepl * srcNum, + summary.getSpaceConsumed()); dfs.concat(target, srcs); summary = dfs.getContentSummary(foo); - Assert.assertEquals(1, summary.getFileCount()); - Assert.assertEquals( - blockSize * REPL_FACTOR + blockSize * 2 * REPL_FACTOR * srcNum, + assertEquals(1, summary.getFileCount()); + assertEquals(blockSize * REPL_FACTOR + blockSize * 2 * REPL_FACTOR * srcNum, summary.getSpaceConsumed()); } @@ -478,23 +477,22 @@ public void testConcatWithQuotaIncrease() throws IOException { } ContentSummary summary = dfs.getContentSummary(bar); - Assert.assertEquals(11, summary.getFileCount()); - Assert.assertEquals(dsQuota, summary.getSpaceConsumed()); + assertEquals(11, summary.getFileCount()); + assertEquals(dsQuota, summary.getSpaceConsumed()); try { dfs.concat(target, srcs); fail("QuotaExceededException expected"); } catch (RemoteException e) { - Assert.assertTrue( + assertTrue( e.unwrapRemoteException() instanceof QuotaExceededException); } dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); dfs.concat(target, srcs); summary = dfs.getContentSummary(bar); - Assert.assertEquals(1, summary.getFileCount()); - Assert.assertEquals(blockSize * repl * (srcNum + 1), - summary.getSpaceConsumed()); + assertEquals(1, summary.getFileCount()); + assertEquals(blockSize * repl * (srcNum + 1), summary.getSpaceConsumed()); } @Test @@ -510,7 +508,8 @@ public void testConcatRelativeTargetPath() throws IOException { assertFalse(dfs.exists(src)); } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testConcatReservedRelativePaths() throws IOException { String testPathDir = "/.reserved/raw/ezone"; Path dir = new Path(testPathDir); @@ -521,7 +520,7 @@ public void testConcatReservedRelativePaths() throws IOException { DFSTestUtil.createFile(dfs, src, blockSize, REPL_FACTOR, 1); try { dfs.concat(trg, new Path[] { src }); - Assert.fail("Must throw Exception!"); + fail("Must throw Exception!"); } catch (IOException e) { String errMsg = "Concat operation doesn't support " + FSDirectory.DOT_RESERVED_STRING + " relative path : " + trg; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java index 78ebcb4e2dfd6..115d93095cf12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java @@ -17,12 +17,14 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.lang.management.ManagementFactory; import java.util.Arrays; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -38,9 +40,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager; import org.apache.hadoop.hdfs.server.blockmanagement.HostFileManager; import org.apache.hadoop.hdfs.util.HostsFileWriter; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Test; import javax.management.MBeanServer; import javax.management.ObjectName; @@ -49,7 +49,8 @@ * DFS_HOSTS and DFS_HOSTS_EXCLUDE tests * */ -@RunWith(Parameterized.class) +@MethodSource("data") +@ParameterizedClass public class TestHostsFiles { private static final Logger LOG = LoggerFactory.getLogger(TestHostsFiles.class.getName()); @@ -59,7 +60,6 @@ public TestHostsFiles(Class hostFileMgrClass) { this.hostFileMgrClass = hostFileMgrClass; } - @Parameterized.Parameters public static Iterable data() { return Arrays.asList(new Object[][]{ {HostFileManager.class}, {CombinedHostFileManager.class}}); @@ -136,8 +136,8 @@ public void testHostsExcludeInUI() throws Exception { ObjectName mxbeanName = new ObjectName( "Hadoop:service=NameNode,name=NameNodeInfo"); String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes"); - assertTrue("Live nodes should contain the decommissioned node", - nodes.contains("Decommissioned")); + assertTrue(nodes.contains("Decommissioned"), + "Live nodes should contain the decommissioned node"); } finally { if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java index 700b32f2895c6..92224a053b9b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java @@ -41,14 +41,16 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Lists; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class TestINodeAttributeProvider { private static final Logger LOG = @@ -233,7 +235,7 @@ private boolean useNullAclFeature(String[] pathElements) { } } - @Before + @BeforeEach public void setUp() throws IOException { CALLED.clear(); Configuration conf = new HdfsConfiguration(); @@ -247,7 +249,7 @@ public void setUp() throws IOException { miniDFS = new MiniDFSCluster.Builder(conf).build(); } - @After + @AfterEach public void cleanUp() throws IOException { CALLED.clear(); if (miniDFS != null) { @@ -256,12 +258,12 @@ public void cleanUp() throws IOException { } runPermissionCheck = false; shouldThrowAccessException = false; - Assert.assertTrue(CALLED.contains("stop")); + assertTrue(CALLED.contains("stop")); } @Test public void testDelegationToProvider() throws Exception { - Assert.assertTrue(CALLED.contains("start")); + assertTrue(CALLED.contains("start")); FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); final Path tmpPath = new Path("/tmp"); final Path fooPath = new Path("/tmp/foo"); @@ -276,21 +278,21 @@ public Void run() throws Exception { FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); CALLED.clear(); fs.mkdirs(fooPath); - Assert.assertTrue(CALLED.contains("getAttributes")); - Assert.assertTrue(CALLED.contains("checkPermission|null|null|null")); - Assert.assertTrue(CALLED.contains("checkPermission|WRITE|null|null")); + assertTrue(CALLED.contains("getAttributes")); + assertTrue(CALLED.contains("checkPermission|null|null|null")); + assertTrue(CALLED.contains("checkPermission|WRITE|null|null")); CALLED.clear(); fs.listStatus(fooPath); - Assert.assertTrue(CALLED.contains("getAttributes")); - Assert.assertTrue( - CALLED.contains("checkPermission|null|null|READ_EXECUTE")); + assertTrue(CALLED.contains("getAttributes")); + assertTrue( + CALLED.contains("checkPermission|null|null|READ_EXECUTE")); CALLED.clear(); fs.getAclStatus(fooPath); - Assert.assertTrue(CALLED.contains("getAttributes")); - Assert.assertTrue(CALLED.contains("checkPermission|null|null|null")); - return null; + assertTrue(CALLED.contains("getAttributes")); + assertTrue(CALLED.contains("checkPermission|null|null|null")); + return null; } }); } @@ -302,9 +304,9 @@ private class AssertHelper { } public void doAssert(boolean x) { if (bypass) { - Assert.assertFalse(x); + assertFalse(x); } else { - Assert.assertTrue(x); + assertTrue(x); } } } @@ -313,7 +315,7 @@ private void testBypassProviderHelper(final String[] users, final short expectedPermission, final boolean bypass) throws Exception { final AssertHelper asserter = new AssertHelper(bypass); - Assert.assertTrue(CALLED.contains("start")); + assertTrue(CALLED.contains("start")); FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); final Path userPath = new Path("/user"); @@ -334,14 +336,14 @@ private void testBypassProviderHelper(final String[] users, @Override public Void run() throws Exception { FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); - Assert.assertEquals(expectedPermission, - fs.getFileStatus(authzChild).getPermission().toShort()); + assertEquals(expectedPermission, + fs.getFileStatus(authzChild).getPermission().toShort()); asserter.doAssert(CALLED.contains("getAttributes")); asserter.doAssert(CALLED.contains("checkPermission|null|null|null")); CALLED.clear(); - Assert.assertEquals(expectedPermission, - fs.listStatus(userPath)[0].getPermission().toShort()); + assertEquals(expectedPermission, + fs.listStatus(userPath)[0].getPermission().toShort()); asserter.doAssert(CALLED.contains("getAttributes")); asserter.doAssert( CALLED.contains("checkPermission|null|null|READ_EXECUTE")); @@ -380,28 +382,25 @@ private void verifyFileStatus(UserGroupInformation ugi) throws IOException { Path userDir = new Path("/user/" + ugi.getShortUserName()); fs.mkdirs(userDir); status = fs.getFileStatus(userDir); - Assert.assertEquals(ugi.getShortUserName(), status.getOwner()); - Assert.assertEquals("supergroup", status.getGroup()); - Assert.assertEquals(new FsPermission((short) 0755), status.getPermission()); + assertEquals(ugi.getShortUserName(), status.getOwner()); + assertEquals("supergroup", status.getGroup()); + assertEquals(new FsPermission((short) 0755), status.getPermission()); Path authzDir = new Path("/user/authz"); fs.mkdirs(authzDir); status = fs.getFileStatus(authzDir); - Assert.assertEquals("foo", status.getOwner()); - Assert.assertEquals("bar", status.getGroup()); - Assert.assertEquals(new FsPermission((short) 0770), status.getPermission()); + assertEquals("foo", status.getOwner()); + assertEquals("bar", status.getGroup()); + assertEquals(new FsPermission((short) 0770), status.getPermission()); AclStatus aclStatus = fs.getAclStatus(authzDir); - Assert.assertEquals(1, aclStatus.getEntries().size()); - Assert.assertEquals(AclEntryType.GROUP, - aclStatus.getEntries().get(0).getType()); - Assert.assertEquals("xxx", - aclStatus.getEntries().get(0).getName()); - Assert.assertEquals(FsAction.ALL, - aclStatus.getEntries().get(0).getPermission()); + assertEquals(1, aclStatus.getEntries().size()); + assertEquals(AclEntryType.GROUP, aclStatus.getEntries().get(0).getType()); + assertEquals("xxx", aclStatus.getEntries().get(0).getName()); + assertEquals(FsAction.ALL, aclStatus.getEntries().get(0).getPermission()); Map xAttrs = fs.getXAttrs(authzDir); - Assert.assertTrue(xAttrs.containsKey("user.test")); - Assert.assertEquals(2, xAttrs.get("user.test").length); + assertTrue(xAttrs.containsKey("user.test")); + assertEquals(2, xAttrs.get("user.test").length); } /** @@ -437,7 +436,7 @@ public void testAclFeature() throws Exception { Path aclChildDir = new Path(aclDir, "subdir"); fs.mkdirs(aclChildDir); AclStatus aclStatus = fs.getAclStatus(aclDir); - Assert.assertEquals(0, aclStatus.getEntries().size()); + assertEquals(0, aclStatus.getEntries().size()); return null; }); } @@ -463,13 +462,13 @@ public void testGetAclStatusReturnsProviderOwnerPerms() throws Exception { @Override public Void run() throws Exception { FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); - Assert.assertEquals(PROVIDER_PERMISSION, - fs.getFileStatus(authzChild).getPermission().toShort()); + assertEquals(PROVIDER_PERMISSION, + fs.getFileStatus(authzChild).getPermission().toShort()); - Assert.assertEquals("foo", fs.getAclStatus(authzChild).getOwner()); - Assert.assertEquals("bar", fs.getAclStatus(authzChild).getGroup()); - Assert.assertEquals(PROVIDER_PERMISSION, - fs.getAclStatus(authzChild).getPermission().toShort()); + assertEquals("foo", fs.getAclStatus(authzChild).getOwner()); + assertEquals("bar", fs.getAclStatus(authzChild).getGroup()); + assertEquals(PROVIDER_PERMISSION, + fs.getAclStatus(authzChild).getPermission().toShort()); return null; } }); @@ -506,7 +505,7 @@ public Void run() throws Exception { // is to ensure ACE is always thrown rather than a sub class to avoid // this issue. } catch (AccessControlException ace) { - Assert.assertEquals(AccessControlException.class, ace.getClass()); + assertEquals(AccessControlException.class, ace.getClass()); } return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 4f18baf1aeaa9..d6af609f822c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -20,11 +20,12 @@ import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS; import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.FileNotFoundException; import java.io.IOException; @@ -73,8 +74,8 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Time; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; @@ -129,14 +130,18 @@ public void testStoragePolicyID () { } } - @Test(expected=IllegalArgumentException.class) - public void testStoragePolicyIdBelowLowerBound () throws IllegalArgumentException { - createINodeFile((byte)-1); + @Test + public void testStoragePolicyIdBelowLowerBound() throws IllegalArgumentException { + assertThrows(IllegalArgumentException.class, () -> { + createINodeFile((byte) -1); + }); } - @Test(expected=IllegalArgumentException.class) - public void testStoragePolicyIdAboveUpperBound () throws IllegalArgumentException { - createINodeFile((byte)16); + @Test + public void testStoragePolicyIdAboveUpperBound() throws IllegalArgumentException { + assertThrows(IllegalArgumentException.class, () -> { + createINodeFile((byte) 16); + }); } @Test @@ -189,9 +194,8 @@ public void testContiguousLayoutRedundancy() { null, perm, 0L, 0L, null, replication, null /*ec policy*/, preferredBlockSize, HdfsConstants.WARM_STORAGE_POLICY_ID, CONTIGUOUS); - Assert.assertTrue(!inodeFile.isStriped()); - Assert.assertEquals(replication.shortValue(), - inodeFile.getFileReplication()); + assertTrue(!inodeFile.isStriped()); + assertEquals(replication.shortValue(), inodeFile.getFileReplication()); } /** @@ -203,8 +207,7 @@ public void testReplication () { replication = 3; preferredBlockSize = 128*1024*1024; INodeFile inf = createINodeFile(replication, preferredBlockSize); - assertEquals("True has to be returned in this case", replication, - inf.getFileReplication()); + assertEquals(replication, inf.getFileReplication(), "True has to be returned in this case"); } /** @@ -212,12 +215,14 @@ public void testReplication () { * for Replication. * @throws IllegalArgumentException as the result */ - @Test(expected=IllegalArgumentException.class) - public void testReplicationBelowLowerBound () - throws IllegalArgumentException { - replication = -1; - preferredBlockSize = 128*1024*1024; - createINodeFile(replication, preferredBlockSize); + @Test + public void testReplicationBelowLowerBound() + throws IllegalArgumentException { + assertThrows(IllegalArgumentException.class, () -> { + replication = -1; + preferredBlockSize = 128 * 1024 * 1024; + createINodeFile(replication, preferredBlockSize); + }); } /** @@ -229,8 +234,8 @@ public void testPreferredBlockSize () { replication = 3; preferredBlockSize = 128*1024*1024; INodeFile inf = createINodeFile(replication, preferredBlockSize); - assertEquals("True has to be returned in this case", preferredBlockSize, - inf.getPreferredBlockSize()); + assertEquals(preferredBlockSize, inf.getPreferredBlockSize(), + "True has to be returned in this case"); } @Test @@ -238,8 +243,8 @@ public void testPreferredBlockSizeUpperBound () { replication = 3; preferredBlockSize = BLKSIZE_MAXVALUE; INodeFile inf = createINodeFile(replication, preferredBlockSize); - assertEquals("True has to be returned in this case", BLKSIZE_MAXVALUE, - inf.getPreferredBlockSize()); + assertEquals(BLKSIZE_MAXVALUE, inf.getPreferredBlockSize(), + "True has to be returned in this case"); } /** @@ -247,26 +252,30 @@ public void testPreferredBlockSizeUpperBound () { * for PreferredBlockSize. * @throws IllegalArgumentException as the result */ - @Test(expected=IllegalArgumentException.class) - public void testPreferredBlockSizeBelowLowerBound () - throws IllegalArgumentException { - replication = 3; - preferredBlockSize = -1; - createINodeFile(replication, preferredBlockSize); - } + @Test + public void testPreferredBlockSizeBelowLowerBound() + throws IllegalArgumentException { + assertThrows(IllegalArgumentException.class, () -> { + replication = 3; + preferredBlockSize = -1; + createINodeFile(replication, preferredBlockSize); + }); + } /** * IllegalArgumentException is expected for setting above upper bound * for PreferredBlockSize. * @throws IllegalArgumentException as the result */ - @Test(expected=IllegalArgumentException.class) - public void testPreferredBlockSizeAboveUpperBound () - throws IllegalArgumentException { - replication = 3; - preferredBlockSize = BLKSIZE_MAXVALUE+1; - createINodeFile(replication, preferredBlockSize); - } + @Test + public void testPreferredBlockSizeAboveUpperBound() + throws IllegalArgumentException { + assertThrows(IllegalArgumentException.class, () -> { + replication = 3; + preferredBlockSize = BLKSIZE_MAXVALUE + 1; + createINodeFile(replication, preferredBlockSize); + }); + } @Test public void testGetFullPathName() { @@ -358,12 +367,12 @@ public void testGetFullPathNameAfterSetQuota() throws Exception { @Test public void testConcatBlocks() { INodeFile origFile = createINodeFiles(1, "origfile")[0]; - assertEquals("Number of blocks didn't match", origFile.numBlocks(), 1L); + assertEquals(origFile.numBlocks(), 1L, "Number of blocks didn't match"); INodeFile[] appendFiles = createINodeFiles(4, "appendfile"); BlockManager bm = Mockito.mock(BlockManager.class); origFile.concatBlocks(appendFiles, bm); - assertEquals("Number of blocks didn't match", origFile.numBlocks(), 5L); + assertEquals(origFile.numBlocks(), 5L, "Number of blocks didn't match"); } /** @@ -589,7 +598,8 @@ public void testInodeId() throws IOException { } } - @Test(timeout=120000) + @Test + @Timeout(value = 120) public void testWriteToDeletedFile() throws IOException { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) @@ -690,8 +700,7 @@ public void testInodeIdBasedPaths() throws Exception { fs.setReplication(testFileInodePath, (short)1); // ClientProtocol#getPreferredBlockSize - assertEquals(testFileBlockSize, - nnRpc.getPreferredBlockSize(testFileInodePath.toString())); + assertEquals(testFileBlockSize, nnRpc.getPreferredBlockSize(testFileInodePath.toString())); /* * HDFS-6749 added missing calls to FSDirectory.resolvePath in the @@ -1261,7 +1270,7 @@ public void testConcat() throws IOException { ContentSummary cs = dfs.getContentSummary(new Path(dir)); QuotaUsage qu = dfs.getQuotaUsage(new Path(dir)); - Assert.assertEquals(cs.getFileCount() + cs.getDirectoryCount(), + assertEquals(cs.getFileCount() + cs.getDirectoryCount(), qu.getFileAndDirectoryCount()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java index f8c532b152ed6..2dffeaee5bdb9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java @@ -32,9 +32,11 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.util.Time; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Ensure during large directory delete, namenode does not block until the @@ -76,13 +78,13 @@ private void createFiles() throws IOException { createFile(filename, 100); } } - + private int getBlockCount() { - Assert.assertNotNull("Null cluster", mc); - Assert.assertNotNull("No Namenode in cluster", mc.getNameNode()); + assertNotNull(mc, "Null cluster"); + assertNotNull(mc.getNameNode(), "No Namenode in cluster"); FSNamesystem namesystem = mc.getNamesystem(); - Assert.assertNotNull("Null Namesystem in cluster", namesystem); - Assert.assertNotNull("Null Namesystem.blockmanager", namesystem.getBlockManager()); + assertNotNull(namesystem, "Null Namesystem in cluster"); + assertNotNull(namesystem.getBlockManager(), "Null Namesystem.blockmanager"); return (int) namesystem.getBlocksTotal(); } @@ -149,7 +151,7 @@ protected void execute() throws Throwable { LOG.info("Deletion took " + (end - start) + "msecs"); LOG.info("createOperations " + createOps); LOG.info("lockOperations " + lockOps); - Assert.assertTrue(lockOps + createOps > 0); + assertTrue(lockOps + createOps > 0); threads[0].rethrow(); threads[1].rethrow(); } @@ -218,9 +220,9 @@ public void largeDelete() throws Throwable { mc = new MiniDFSCluster.Builder(CONF).build(); try { mc.waitActive(); - Assert.assertNotNull("No Namenode in cluster", mc.getNameNode()); + assertNotNull(mc.getNameNode(), "No Namenode in cluster"); createFiles(); - Assert.assertEquals(TOTAL_BLOCKS, getBlockCount()); + assertEquals(TOTAL_BLOCKS, getBlockCount()); runThreads(); } finally { mc.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java index e73a2e0030f94..d45e0ed58f822 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; @@ -39,9 +38,8 @@ import org.apache.hadoop.hdfs.util.RwLockMode; import org.apache.hadoop.util.Lists; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.IOException; import java.util.ArrayList; @@ -53,12 +51,11 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import static org.junit.Assert.assertThat; import static org.mockito.Mockito.*; +import static org.assertj.core.api.Assertions.assertThat; +@Timeout(300) public class TestLeaseManager { - @Rule - public Timeout timeout = new Timeout(300000); public static long maxLockHoldToReleaseLeaseMs = 100; @@ -122,26 +119,26 @@ public void testCountPath() { LeaseManager lm = new LeaseManager(makeMockFsNameSystem()); lm.addLease("holder1", 1); - assertThat(lm.countPath(), is(1L)); + assertThat(lm.countPath()).isEqualTo(1L); lm.addLease("holder2", 2); - assertThat(lm.countPath(), is(2L)); + assertThat(lm.countPath()).isEqualTo(2L); lm.addLease("holder2", 2); // Duplicate addition - assertThat(lm.countPath(), is(2L)); + assertThat(lm.countPath()).isEqualTo(2L); - assertThat(lm.countPath(), is(2L)); + assertThat(lm.countPath()).isEqualTo(2L); // Remove a couple of non-existing leases. countPath should not change. lm.removeLease("holder2", stubInodeFile(3)); lm.removeLease("InvalidLeaseHolder", stubInodeFile(1)); - assertThat(lm.countPath(), is(2L)); + assertThat(lm.countPath()).isEqualTo(2L); INodeFile file = stubInodeFile(1); lm.reassignLease(lm.getLease(file), file, "holder2"); - assertThat(lm.countPath(), is(2L)); // Count unchanged on reassign + assertThat(lm.countPath()).isEqualTo(2L); // Count unchanged on reassign lm.removeLease("holder2", stubInodeFile(2)); // Remove existing - assertThat(lm.countPath(), is(1L)); + assertThat(lm.countPath()).isEqualTo(1L); } /** @@ -176,8 +173,8 @@ public void testLeaseRestorationOnRestart() throws Exception { // Check whether the lease manager has the lease dir = cluster.getNamesystem().getFSDirectory(); file = dir.getINode(path).asFile(); - assertTrue("Lease should exist.", - cluster.getNamesystem().leaseManager.getLease(file) != null); + assertTrue(cluster.getNamesystem().leaseManager.getLease(file) != null, + "Lease should exist."); } finally { if (cluster != null) { cluster.shutdown(); @@ -191,7 +188,8 @@ public void testLeaseRestorationOnRestart() throws Exception { * {@link LeaseManager#getINodeIdWithLeases()} and * {@link LeaseManager#getINodeWithLeases(INodeDirectory)}. */ - @Test (timeout = 60000) + @Test + @Timeout(value = 60) public void testInodeWithLeases() throws Exception { FSNamesystem fsNamesystem = makeMockFsNameSystem(); when(fsNamesystem.getMaxListOpenFilesResponses()).thenReturn(1024); @@ -233,7 +231,8 @@ public void testInodeWithLeases() throws Exception { * {@link LeaseManager#getINodeIdWithLeases()} and * {@link LeaseManager#getINodeWithLeases(INodeDirectory)}. */ - @Test (timeout = 240000) + @Test + @Timeout(value = 240) public void testInodeWithLeasesAtScale() throws Exception { FSNamesystem fsNamesystem = makeMockFsNameSystem(); when(fsNamesystem.getMaxListOpenFilesResponses()).thenReturn(4096); @@ -312,7 +311,8 @@ private void testInodeWithLeasesAtScaleImpl(FSNamesystem fsNamesystem, * {@link LeaseManager#getINodeIdWithLeases()} and * {@link LeaseManager#getINodeWithLeases(INodeDirectory)}. */ - @Test (timeout = 60000) + @Test + @Timeout(value = 60) public void testInodeWithLeasesForAncestorDir() throws Exception { FSNamesystem fsNamesystem = makeMockFsNameSystem(); FSDirectory fsDirectory = fsNamesystem.getFSDirectory(); @@ -350,8 +350,7 @@ public void testInodeWithLeasesForAncestorDir() throws Exception { } assertEquals(pathTree.length, lm.getINodeIdWithLeases().size()); assertEquals(pathTree.length, lm.getINodeWithLeases().size()); - assertEquals(pathTree.length, lm.getINodeWithLeases( - rootInodeDirectory).size()); + assertEquals(pathTree.length, lm.getINodeWithLeases(rootInodeDirectory).size()); // reset lm.removeAllLeases(); @@ -370,8 +369,7 @@ public void testInodeWithLeasesForAncestorDir() throws Exception { assertTrue(filesLeased.contains(leasedFileName)); } - assertEquals(filesLeased.size(), - lm.getINodeWithLeases(rootInodeDirectory).size()); + assertEquals(filesLeased.size(), lm.getINodeWithLeases(rootInodeDirectory).size()); assertEquals(filesLeased.size() - 2, lm.getINodeWithLeases(pathINodeMap.get("ENG").asDirectory()).size()); assertEquals(filesLeased.size() - 2, @@ -384,15 +382,13 @@ public void testInodeWithLeasesForAncestorDir() throws Exception { lm.getINodeWithLeases(pathINodeMap.get("n").asDirectory()).size()); lm.removeLease(pathINodeMap.get("n2.log").getId()); - assertEquals(filesLeased.size() - 1, - lm.getINodeWithLeases(rootInodeDirectory).size()); + assertEquals(filesLeased.size() - 1, lm.getINodeWithLeases(rootInodeDirectory).size()); assertEquals(filesLeased.size() - 4, lm.getINodeWithLeases(pathINodeMap.get("n").asDirectory()).size()); lm.removeAllLeases(); filesLeased.clear(); - assertEquals(filesLeased.size(), - lm.getINodeWithLeases(rootInodeDirectory).size()); + assertEquals(filesLeased.size(), lm.getINodeWithLeases(rootInodeDirectory).size()); } @@ -400,19 +396,15 @@ private void verifyINodeLeaseCounts(FSNamesystem fsNamesystem, LeaseManager leaseManager, INodeDirectory ancestorDirectory, int iNodeIdWithLeaseCount, int iNodeWithLeaseCount, int iNodeUnderAncestorLeaseCount) throws IOException { - assertEquals(iNodeIdWithLeaseCount, - leaseManager.getINodeIdWithLeases().size()); - assertEquals(iNodeWithLeaseCount, - leaseManager.getINodeWithLeases().size()); + assertEquals(iNodeIdWithLeaseCount, leaseManager.getINodeIdWithLeases().size()); + assertEquals(iNodeWithLeaseCount, leaseManager.getINodeWithLeases().size()); assertEquals(iNodeUnderAncestorLeaseCount, leaseManager.getINodeWithLeases(ancestorDirectory).size()); - assertEquals(iNodeIdWithLeaseCount, - leaseManager.getUnderConstructionFiles(0).size()); + assertEquals(iNodeIdWithLeaseCount, leaseManager.getUnderConstructionFiles(0).size()); assertEquals(0, - (fsNamesystem.getFilesBlockingDecom(0, - OpenFilesIterator.FILTER_PATH_DEFAULT) == null ? 0 - : fsNamesystem.getFilesBlockingDecom(0, - OpenFilesIterator.FILTER_PATH_DEFAULT).size())); + (fsNamesystem.getFilesBlockingDecom(0, OpenFilesIterator.FILTER_PATH_DEFAULT) == null ? 0 + : fsNamesystem.getFilesBlockingDecom(0, OpenFilesIterator.FILTER_PATH_DEFAULT) + .size())); } private Map createINodeTree(INodeDirectory parentDir, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java index a197c00bceea0..81cb1d9a39e65 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.IOException; @@ -46,7 +46,8 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.util.StringUtils; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; /** @@ -61,7 +62,8 @@ public class TestListCorruptFileBlocks { static final Logger LOG = NameNode.stateChangeLog; /** check if nn.getCorruptFiles() returns a file that has corrupted blocks */ - @Test (timeout=300000) + @Test + @Timeout(value = 300) public void testListCorruptFilesCorruptedBlock() throws Exception { MiniDFSCluster cluster = null; @@ -93,18 +95,18 @@ public void testListCorruptFilesCorruptedBlock() throws Exception { final NameNode namenode = cluster.getNameNode(); Collection badFiles = namenode. getNamesystem().listCorruptFileBlocks("/", null); - assertEquals("Namenode has " + badFiles.size() - + " corrupt files. Expecting None.", 0, badFiles.size()); + assertEquals(0, badFiles.size(), + "Namenode has " + badFiles.size() + " corrupt files. Expecting None."); assertCorruptFilesCount(cluster, badFiles.size()); // Now deliberately corrupt one block String bpid = cluster.getNamesystem().getBlockPoolId(); File storageDir = cluster.getInstanceStorageDir(0, 1); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); - assertTrue("data directory does not exist", data_dir.exists()); + assertTrue(data_dir.exists(), "data directory does not exist"); List metaFiles = MiniDFSCluster.getAllBlockFiles(data_dir); - assertTrue("Data directory does not contain any blocks or there was an " - + "IO error", metaFiles != null && !metaFiles.isEmpty()); + assertTrue(metaFiles != null && !metaFiles.isEmpty(), + "Data directory does not contain any blocks or there was an " + "IO error"); File metaFile = metaFiles.get(0); RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); FileChannel channel = file.getChannel(); @@ -122,15 +124,16 @@ public void testListCorruptFilesCorruptedBlock() throws Exception { } catch (BlockMissingException e) { System.out.println("Received BlockMissingException as expected."); } catch (IOException e) { - assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " + - " but received IOException " + e, false); + assertTrue(false, + "Corrupted replicas not handled properly. Expecting BlockMissingException " + + " but received IOException " + e); } // fetch bad file list from namenode. There should be one file. badFiles = namenode.getNamesystem().listCorruptFileBlocks("/", null); LOG.info("Namenode has bad files. " + badFiles.size()); - assertEquals("Namenode has " + badFiles.size() + " bad files. " + - "Expecting 1.", 1, badFiles.size()); + assertEquals(1, badFiles.size(), + "Namenode has " + badFiles.size() + " bad files. " + "Expecting 1."); assertCorruptFilesCount(cluster, badFiles.size()); util.cleanup(fs, "/srcdat10"); } finally { @@ -141,7 +144,8 @@ public void testListCorruptFilesCorruptedBlock() throws Exception { /** * Check that listCorruptFileBlocks works while the namenode is still in safemode. */ - @Test (timeout=300000) + @Test + @Timeout(value = 300) public void testListCorruptFileBlocksInSafeMode() throws Exception { MiniDFSCluster cluster = null; @@ -176,18 +180,18 @@ public void testListCorruptFileBlocksInSafeMode() throws Exception { // fetch bad file list from namenode. There should be none. Collection badFiles = cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/", null); - assertEquals("Namenode has " + badFiles.size() - + " corrupt files. Expecting None.", 0, badFiles.size()); + assertEquals(0, badFiles.size(), + "Namenode has " + badFiles.size() + " corrupt files. Expecting None."); assertCorruptFilesCount(cluster, badFiles.size()); // Now deliberately corrupt one block File storageDir = cluster.getInstanceStorageDir(0, 0); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, cluster.getNamesystem().getBlockPoolId()); - assertTrue("data directory does not exist", data_dir.exists()); + assertTrue(data_dir.exists(), "data directory does not exist"); List metaFiles = MiniDFSCluster.getAllBlockFiles(data_dir); - assertTrue("Data directory does not contain any blocks or there was an " - + "IO error", metaFiles != null && !metaFiles.isEmpty()); + assertTrue(metaFiles != null && !metaFiles.isEmpty(), + "Data directory does not contain any blocks or there was an " + "IO error"); File metaFile = metaFiles.get(0); RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); FileChannel channel = file.getChannel(); @@ -205,17 +209,17 @@ public void testListCorruptFileBlocksInSafeMode() throws Exception { } catch (BlockMissingException e) { System.out.println("Received BlockMissingException as expected."); } catch (IOException e) { - assertTrue("Corrupted replicas not handled properly. " + - "Expecting BlockMissingException " + - " but received IOException " + e, false); + assertTrue(false, "Corrupted replicas not handled properly. " + + "Expecting BlockMissingException " + + " but received IOException " + e); } // fetch bad file list from namenode. There should be one file. badFiles = cluster.getNameNode().getNamesystem(). listCorruptFileBlocks("/", null); LOG.info("Namenode has bad files. " + badFiles.size()); - assertEquals("Namenode has " + badFiles.size() + " bad files. " + - "Expecting 1.", 1, badFiles.size()); + assertEquals(1, badFiles.size(), + "Namenode has " + badFiles.size() + " bad files. " + "Expecting 1."); assertCorruptFilesCount(cluster, badFiles.size()); // restart namenode @@ -238,22 +242,21 @@ public void testListCorruptFileBlocksInSafeMode() throws Exception { } catch (BlockMissingException e) { System.out.println("Received BlockMissingException as expected."); } catch (IOException e) { - assertTrue("Corrupted replicas not handled properly. " + - "Expecting BlockMissingException " + - " but received IOException " + e, false); + assertTrue(false, "Corrupted replicas not handled properly. " + + "Expecting BlockMissingException " + + " but received IOException " + e); } // fetch bad file list from namenode. There should be one file. badFiles = cluster.getNameNode().getNamesystem(). listCorruptFileBlocks("/", null); LOG.info("Namenode has bad files. " + badFiles.size()); - assertEquals("Namenode has " + badFiles.size() + " bad files. " + - "Expecting 1.", 1, badFiles.size()); + assertEquals(1, badFiles.size(), + "Namenode has " + badFiles.size() + " bad files. " + "Expecting 1."); assertCorruptFilesCount(cluster, badFiles.size()); // check that we are still in safe mode - assertTrue("Namenode is not in safe mode", - cluster.getNameNode().isInSafeMode()); + assertTrue(cluster.getNameNode().isInSafeMode(), "Namenode is not in safe mode"); // now leave safe mode so that we can clean up cluster.getNameNodeRpc().setSafeMode( @@ -271,7 +274,8 @@ public void testListCorruptFileBlocksInSafeMode() throws Exception { } // deliberately remove blocks from a file and validate the list-corrupt-file-blocks API - @Test (timeout=300000) + @Test + @Timeout(value = 300) public void testlistCorruptFileBlocks() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); @@ -310,9 +314,9 @@ public void testlistCorruptFileBlocks() throws Exception { for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); LOG.info("Deliberately removing file " + blockFile.getName()); - assertTrue("Cannot remove file.", blockFile.delete()); + assertTrue(blockFile.delete(), "Cannot remove file."); LOG.info("Deliberately removing file " + metadataFile.getName()); - assertTrue("Cannot remove file.", metadataFile.delete()); + assertTrue(metadataFile.delete(), "Cannot remove file."); // break; } } @@ -384,7 +388,8 @@ private int countPaths(RemoteIterator iter) throws IOException { /** * test listCorruptFileBlocks in DistributedFileSystem */ - @Test (timeout=300000) + @Test + @Timeout(value = 300) public void testlistCorruptFileBlocksDFS() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); @@ -423,9 +428,9 @@ public void testlistCorruptFileBlocksDFS() throws Exception { for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); LOG.info("Deliberately removing file " + blockFile.getName()); - assertTrue("Cannot remove file.", blockFile.delete()); + assertTrue(blockFile.delete(), "Cannot remove file."); LOG.info("Deliberately removing file " + metadataFile.getName()); - assertTrue("Cannot remove file.", metadataFile.delete()); + assertTrue(metadataFile.delete(), "Cannot remove file."); // break; } } @@ -461,7 +466,8 @@ public void testlistCorruptFileBlocksDFS() throws Exception { * Also, test that DFS.listCorruptFileBlocks can make multiple successive * calls. */ - @Test (timeout=300000) + @Test + @Timeout(value = 300) public void testMaxCorruptFiles() throws Exception { MiniDFSCluster cluster = null; try { @@ -483,9 +489,8 @@ public void testMaxCorruptFiles() throws Exception { final NameNode namenode = cluster.getNameNode(); Collection badFiles = namenode. getNamesystem().listCorruptFileBlocks("/srcdat2", null); - assertEquals( - "Namenode has " + badFiles.size() + " corrupt files. Expecting none.", - 0, badFiles.size()); + assertEquals(0, badFiles.size(), + "Namenode has " + badFiles.size() + " corrupt files. Expecting none."); assertCorruptFilesCount(cluster, badFiles.size()); // Now deliberately blocks from all files @@ -501,8 +506,8 @@ public void testMaxCorruptFiles() throws Exception { continue; for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); - assertTrue("Cannot remove file.", blockFile.delete()); - assertTrue("Cannot remove file.", metadataFile.delete()); + assertTrue(blockFile.delete(), "Cannot remove file."); + assertTrue(metadataFile.delete(), "Cannot remove file."); } } } @@ -530,19 +535,16 @@ public void testMaxCorruptFiles() throws Exception { badFiles = namenode.getNamesystem(). listCorruptFileBlocks("/srcdat2", null); LOG.info("Namenode has bad files. " + badFiles.size()); - assertEquals("Namenode has " + badFiles.size() + " bad files. " + - "Expecting " + maxCorruptFileBlocks + ".", maxCorruptFileBlocks, - badFiles.size()); + assertEquals(maxCorruptFileBlocks, badFiles.size(), "Namenode has " + badFiles.size() + + " bad files. " + "Expecting " + maxCorruptFileBlocks + "."); CorruptFileBlockIterator iter = (CorruptFileBlockIterator) fs.listCorruptFileBlocks(new Path("/srcdat2")); int corruptPaths = countPaths(iter); - assertTrue("Expected more than " + maxCorruptFileBlocks + - " corrupt file blocks but got " + corruptPaths, - corruptPaths > maxCorruptFileBlocks); - assertTrue("Iterator should have made more than 1 call but made " + - iter.getCallsMade(), - iter.getCallsMade() > 1); + assertTrue(corruptPaths > maxCorruptFileBlocks, "Expected more than " + maxCorruptFileBlocks + + " corrupt file blocks but got " + corruptPaths); + assertTrue(iter.getCallsMade() > 1, + "Iterator should have made more than 1 call but made " + iter.getCallsMade()); util.cleanup(fs, "/srcdat2"); } finally { @@ -550,7 +552,8 @@ public void testMaxCorruptFiles() throws Exception { } } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testListCorruptFileBlocksOnRelativePath() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); @@ -591,9 +594,9 @@ public void testListCorruptFileBlocksOnRelativePath() throws Exception { for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); LOG.info("Deliberately removing file " + blockFile.getName()); - assertTrue("Cannot remove file.", blockFile.delete()); + assertTrue(blockFile.delete(), "Cannot remove file."); LOG.info("Deliberately removing file " + metadataFile.getName()); - assertTrue("Cannot remove file.", metadataFile.delete()); + assertTrue(metadataFile.delete(), "Cannot remove file."); } } @@ -610,7 +613,7 @@ public void testListCorruptFileBlocksOnRelativePath() throws Exception { } // Validate we get all the corrupt files LOG.info("Namenode has bad files. " + numCorrupt); - assertEquals("Failed to get corrupt files!", 3, numCorrupt); + assertEquals(3, numCorrupt, "Failed to get corrupt files!"); util.cleanup(fs, "corruptData"); } finally { @@ -628,7 +631,7 @@ public void testListCorruptFileBlocksOnRelativePath() throws Exception { private void assertCorruptFilesCount(MiniDFSCluster cluster, int expectedCorrupt) { FSNamesystem fs = cluster.getNameNode().getNamesystem(); - assertEquals("Incorrect number of corrupt files returned", expectedCorrupt, - fs.getCorruptFilesCount()); + assertEquals(expectedCorrupt, fs.getCorruptFilesCount(), + "Incorrect number of corrupt files returned"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java index 7c448f19a3bb8..6c6c2b0008d52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java @@ -18,9 +18,10 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.test.LambdaTestUtils.intercept; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.EnumSet; @@ -56,10 +57,10 @@ import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ChunkedArrayList; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.Assert; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; /** * Verify open files listing. @@ -73,7 +74,7 @@ public class TestListOpenFiles { private static final Logger LOG = LoggerFactory.getLogger(TestListOpenFiles.class); - @Before + @BeforeEach public void setUp() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); @@ -86,7 +87,7 @@ public void setUp() throws IOException { nnRpc = cluster.getNameNodeRpc(); } - @After + @AfterEach public void tearDown() throws IOException { if (fs != null) { fs.close(); @@ -96,7 +97,8 @@ public void tearDown() throws IOException { } } - @Test(timeout = 120000L) + @Test + @Timeout(120) public void testListOpenFilesViaNameNodeRPC() throws Exception { HashMap openFiles = new HashMap<>(); createFiles(fs, "closed", 10); @@ -105,13 +107,12 @@ public void testListOpenFilesViaNameNodeRPC() throws Exception { BatchedEntries openFileEntryBatchedEntries = nnRpc.listOpenFiles(0, EnumSet.of(OpenFilesType.ALL_OPEN_FILES), OpenFilesIterator.FILTER_PATH_DEFAULT); - assertTrue("Open files list should be empty!", - openFileEntryBatchedEntries.size() == 0); + assertTrue(openFileEntryBatchedEntries.size() == 0, "Open files list should be empty!"); BatchedEntries openFilesBlockingDecomEntries = nnRpc.listOpenFiles(0, EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION), OpenFilesIterator.FILTER_PATH_DEFAULT); - assertTrue("Open files list blocking decommission should be empty!", - openFilesBlockingDecomEntries.size() == 0); + assertTrue(openFilesBlockingDecomEntries.size() == 0, + "Open files list blocking decommission should be empty!"); openFiles.putAll( DFSTestUtil.createOpenFiles(fs, "open-1", 1)); @@ -147,18 +148,15 @@ private void verifyOpenFiles(Map openFiles, batchedEntries = nnRpc.listOpenFiles(lastEntry.getId(), openFilesTypes, path); } - assertTrue("Incorrect open files list size!", - batchedEntries.size() <= BATCH_SIZE); + assertTrue(batchedEntries.size() <= BATCH_SIZE, "Incorrect open files list size!"); for (int i = 0; i < batchedEntries.size(); i++) { lastEntry = batchedEntries.get(i); String filePath = lastEntry.getFilePath(); LOG.info("OpenFile: " + filePath); - assertTrue("Unexpected open file: " + filePath, - remainingFiles.remove(new Path(filePath))); + assertTrue(remainingFiles.remove(new Path(filePath)), "Unexpected open file: " + filePath); } } while (batchedEntries.hasMore()); - assertTrue(remainingFiles.size() + " open files not listed!", - remainingFiles.size() == 0); + assertTrue(remainingFiles.size() == 0, remainingFiles.size() + " open files not listed!"); } /** @@ -192,7 +190,8 @@ private Set createFiles(FileSystem fileSystem, String fileNamePrefix, /** * Verify dfsadmin -listOpenFiles command in HA mode. */ - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testListOpenFilesInHA() throws Exception { fs.close(); cluster.shutdown(); @@ -228,10 +227,9 @@ public void testListOpenFilesInHA() throws Exception { public void run() { while(!failoverCompleted.get()) { try { + assertEquals(0, ToolRunner.run(dfsAdmin, new String[]{"-listOpenFiles"})); assertEquals(0, ToolRunner.run(dfsAdmin, - new String[] {"-listOpenFiles"})); - assertEquals(0, ToolRunner.run(dfsAdmin, - new String[] {"-listOpenFiles", "-blockingDecommission"})); + new String[]{"-listOpenFiles", "-blockingDecommission"})); // Sleep for some time to avoid // flooding logs with listing. Thread.sleep(listingIntervalMsec); @@ -255,11 +253,10 @@ public void run() { haCluster.transitionToActive(1); failoverCompleted.set(true); - assertEquals(0, ToolRunner.run(dfsAdmin, - new String[] {"-listOpenFiles"})); - assertEquals(0, ToolRunner.run(dfsAdmin, - new String[] {"-listOpenFiles", "-blockingDecommission"})); - assertFalse("Client Error!", listOpenFilesError.get()); + assertEquals(0, ToolRunner.run(dfsAdmin, new String[]{"-listOpenFiles"})); + assertEquals(0, + ToolRunner.run(dfsAdmin, new String[]{"-listOpenFiles", "-blockingDecommission"})); + assertFalse(listOpenFilesError.get(), "Client Error!"); clientThread.join(); } finally { @@ -269,7 +266,8 @@ public void run() { } } - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testListOpenFilesWithFilterPath() throws IOException { HashMap openFiles = new HashMap<>(); createFiles(fs, "closed", 10); @@ -278,13 +276,12 @@ public void testListOpenFilesWithFilterPath() throws IOException { BatchedEntries openFileEntryBatchedEntries = nnRpc .listOpenFiles(0, EnumSet.of(OpenFilesType.ALL_OPEN_FILES), OpenFilesIterator.FILTER_PATH_DEFAULT); - assertTrue("Open files list should be empty!", - openFileEntryBatchedEntries.size() == 0); + assertTrue(openFileEntryBatchedEntries.size() == 0, "Open files list should be empty!"); BatchedEntries openFilesBlockingDecomEntries = nnRpc .listOpenFiles(0, EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION), OpenFilesIterator.FILTER_PATH_DEFAULT); - assertTrue("Open files list blocking decommission should be empty!", - openFilesBlockingDecomEntries.size() == 0); + assertTrue(openFilesBlockingDecomEntries.size() == 0, + "Open files list blocking decommission should be empty!"); openFiles.putAll( DFSTestUtil.createOpenFiles(fs, new Path("/base"), "open-1", 1)); @@ -348,7 +345,7 @@ public void testListOpenFilesWithDeletedPath() throws Exception { assertEquals(0, openFileEntryBatchedEntries.size()); fsNamesystem.leaseManager.removeLease(dir.getINode(path).getId()); } catch (NullPointerException e) { - Assert.fail("Should not throw NPE when the file is deleted but has lease!"); + fail("Should not throw NPE when the file is deleted but has lease!"); } finally { fsNamesystem.writeUnlock(RwLockMode.FS, "testListOpenFilesWithDeletedPath"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java index e1663e29615b5..de0192f0bde59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -26,8 +26,9 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.util.ExitUtil; -import org.junit.After; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -43,7 +44,7 @@ public class TestMetadataVersionOutput { private MiniDFSCluster dfsCluster = null; private final Configuration conf = new Configuration(); - @After + @AfterEach public void tearDown() throws Exception { if (dfsCluster != null) { dfsCluster.shutdown(); @@ -60,7 +61,8 @@ private void initConfig() { conf.unset(DFS_NAMENODE_NAME_DIR_KEY); } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testMetadataVersionOutput() throws IOException { initConfig(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java index e32e77e351930..c27ea3dc03ffe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; -import org.junit.Test; +import org.junit.jupiter.api.Test; /** * Test for {@link NameCache} class diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java index 6a36c9879b117..fcf40c0a8adc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.conf.Configuration; -import org.junit.BeforeClass; +import org.junit.jupiter.api.BeforeAll; /** * Tests NameNode interaction for all ACL modification APIs. This test suite @@ -26,7 +26,7 @@ */ public class TestNameNodeAcl extends FSAclBaseTest { - @BeforeClass + @BeforeAll public static void init() throws Exception { conf = new Configuration(); startCluster(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java index b0ebb4b72f5d1..4d234bf959665 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java @@ -33,15 +33,15 @@ import java.util.function.Supplier; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.io.OutputStream; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; public class TestNameNodeMetadataConsistency { private static final Path filePath1 = new Path("/testdata1.txt"); @@ -53,7 +53,7 @@ public class TestNameNodeMetadataConsistency { MiniDFSCluster cluster; HdfsConfiguration conf; - @Before + @BeforeEach public void InitTest() throws IOException { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, @@ -63,7 +63,7 @@ public void InitTest() throws IOException { .build(); } - @After + @AfterEach public void cleanup() { if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java index 1722e12089a9c..2302e5c519733 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java @@ -21,8 +21,11 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSTestUtil; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Tests NameNode interaction for all XAttr APIs. @@ -36,7 +39,8 @@ public class TestNameNodeXAttr extends FSXAttrBaseTest { private static final Path link = new Path(linkParent, "link"); private static final Path target = new Path(targetParent, "target"); - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testXAttrSymlinks() throws Exception { fs.mkdirs(linkParent); fs.mkdirs(targetParent); @@ -47,27 +51,27 @@ public void testXAttrSymlinks() throws Exception { fs.setXAttr(target, name2, value2); Map xattrs = fs.getXAttrs(link); - Assert.assertEquals(xattrs.size(), 2); - Assert.assertArrayEquals(value1, xattrs.get(name1)); - Assert.assertArrayEquals(value2, xattrs.get(name2)); + assertEquals(xattrs.size(), 2); + assertArrayEquals(value1, xattrs.get(name1)); + assertArrayEquals(value2, xattrs.get(name2)); fs.setXAttr(link, name3, null); xattrs = fs.getXAttrs(target); - Assert.assertEquals(xattrs.size(), 3); - Assert.assertArrayEquals(value1, xattrs.get(name1)); - Assert.assertArrayEquals(value2, xattrs.get(name2)); - Assert.assertArrayEquals(new byte[0], xattrs.get(name3)); + assertEquals(xattrs.size(), 3); + assertArrayEquals(value1, xattrs.get(name1)); + assertArrayEquals(value2, xattrs.get(name2)); + assertArrayEquals(new byte[0], xattrs.get(name3)); fs.removeXAttr(link, name1); xattrs = fs.getXAttrs(target); - Assert.assertEquals(xattrs.size(), 2); - Assert.assertArrayEquals(value2, xattrs.get(name2)); - Assert.assertArrayEquals(new byte[0], xattrs.get(name3)); + assertEquals(xattrs.size(), 2); + assertArrayEquals(value2, xattrs.get(name2)); + assertArrayEquals(new byte[0], xattrs.get(name3)); fs.removeXAttr(target, name3); xattrs = fs.getXAttrs(link); - Assert.assertEquals(xattrs.size(), 1); - Assert.assertArrayEquals(value2, xattrs.get(name2)); + assertEquals(xattrs.size(), 1); + assertArrayEquals(value2, xattrs.get(name2)); fs.delete(linkParent, true); fs.delete(targetParent, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java index a285cd303c9ca..3fa5ccc0361dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java @@ -19,16 +19,16 @@ import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest; import org.apache.hadoop.security.UserGroupInformation; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; /** * Tests ACL APIs via WebHDFS. */ public class TestWebHDFSAcl extends FSAclBaseTest { - @BeforeClass + @BeforeAll public static void init() throws Exception { conf = WebHdfsTestUtil.createConf(); startCluster(); @@ -40,7 +40,7 @@ public static void init() throws Exception { */ @Override @Test - @Ignore + @Disabled public void testDefaultAclNewSymlinkIntermediate() { }