50
50
import org .apache .hadoop .metrics2 .MetricsSystem ;
51
51
import org .apache .hadoop .metrics2 .lib .DefaultMetricsSystem ;
52
52
import org .apache .hadoop .test .GenericTestUtils ;
53
- import org .junit .After ;
54
- import org .junit .Test ;
53
+ import org .junit .jupiter .api .AfterEach ;
54
+ import org .junit .jupiter .api .Test ;
55
+ import org .junit .jupiter .api .Timeout ;
55
56
import org .slf4j .Logger ;
56
57
import org .slf4j .LoggerFactory ;
57
58
import org .slf4j .event .Level ;
77
78
import static org .apache .hadoop .hdfs .DFSConfigKeys .DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY ;
78
79
import static org .apache .hadoop .hdfs .DFSConfigKeys .DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC ;
79
80
import static org .apache .hadoop .test .PlatformAssumptions .assumeNotWindows ;
80
- import static org .junit .Assert .assertEquals ;
81
- import static org .junit .Assert .assertTrue ;
81
+ import static org .junit .jupiter . api . Assertions .assertEquals ;
82
+ import static org .junit .jupiter . api . Assertions .assertTrue ;
82
83
83
84
/**
84
85
* Some long running Balancer tasks.
@@ -101,7 +102,7 @@ public class TestBalancerLongRunningTasks {
101
102
private final static Path FILE_PATH = new Path (FILE_NAME );
102
103
private MiniDFSCluster cluster ;
103
104
104
- @ After
105
+ @ AfterEach
105
106
public void shutdown () throws Exception {
106
107
if (cluster != null ) {
107
108
cluster .shutdown ();
@@ -159,7 +160,8 @@ static void initConfWithRamDisk(Configuration conf,
159
160
* Replica in (DN0,SSD) should not be moved to (DN1,SSD).
160
161
* Otherwise DN1 has 2 replicas.
161
162
*/
162
- @ Test (timeout = 100000 )
163
+ @ Test
164
+ @ Timeout (value = 100 )
163
165
public void testTwoReplicaShouldNotInSameDN () throws Exception {
164
166
final Configuration conf = new HdfsConfiguration ();
165
167
@@ -218,7 +220,8 @@ public void testTwoReplicaShouldNotInSameDN() throws Exception {
218
220
* One DN has two files on RAM_DISK, other DN has no files on RAM_DISK.
219
221
* Then verify that the balancer does not migrate files on RAM_DISK across DN.
220
222
*/
221
- @ Test (timeout = 300000 )
223
+ @ Test
224
+ @ Timeout (value = 300 )
222
225
public void testBalancerWithRamDisk () throws Exception {
223
226
final int seed = 0xFADED ;
224
227
final short replicationFactor = 1 ;
@@ -285,7 +288,8 @@ public void testBalancerWithRamDisk() throws Exception {
285
288
/**
286
289
* Balancer should not move blocks with size < minBlockSize.
287
290
*/
288
- @ Test (timeout = 60000 )
291
+ @ Test
292
+ @ Timeout (value = 60 )
289
293
public void testMinBlockSizeAndSourceNodes () throws Exception {
290
294
final Configuration conf = new HdfsConfiguration ();
291
295
initConf (conf );
@@ -405,7 +409,8 @@ public void testMinBlockSizeAndSourceNodes() throws Exception {
405
409
*
406
410
* @throws Exception
407
411
*/
408
- @ Test (timeout = 100000 )
412
+ @ Test
413
+ @ Timeout (value = 100 )
409
414
public void testUpgradeDomainPolicyAfterBalance () throws Exception {
410
415
final Configuration conf = new HdfsConfiguration ();
411
416
initConf (conf );
@@ -425,7 +430,8 @@ public void testUpgradeDomainPolicyAfterBalance() throws Exception {
425
430
*
426
431
* @throws Exception
427
432
*/
428
- @ Test (timeout = 100000 )
433
+ @ Test
434
+ @ Timeout (value = 100 )
429
435
public void testRackPolicyAfterBalance () throws Exception {
430
436
final Configuration conf = new HdfsConfiguration ();
431
437
initConf (conf );
@@ -505,7 +511,8 @@ private void runBalancerAndVerifyBlockPlacmentPolicy(Configuration conf,
505
511
*
506
512
* @throws Exception
507
513
*/
508
- @ Test (timeout = 100000 )
514
+ @ Test
515
+ @ Timeout (value = 100 )
509
516
public void testBalancerWithPinnedBlocks () throws Exception {
510
517
// This test assumes stick-bit based block pin mechanism available only
511
518
// in Linux/Unix. It can be unblocked on Windows when HDFS-7759 is ready to
@@ -559,7 +566,8 @@ public void testBalancerWithPinnedBlocks() throws Exception {
559
566
assertEquals (ExitStatus .NO_MOVE_PROGRESS .getExitCode (), r );
560
567
}
561
568
562
- @ Test (timeout = 60000 )
569
+ @ Test
570
+ @ Timeout (value = 60 )
563
571
public void testBalancerWithSortTopNodes () throws Exception {
564
572
final Configuration conf = new HdfsConfiguration ();
565
573
initConf (conf );
@@ -666,16 +674,17 @@ public void testBalancerWithSortTopNodes() throws Exception {
666
674
// Hence, overall total blocks moved by HDFS balancer would be either of these 2 options:
667
675
// a) 2 blocks of total size (100B + 100B)
668
676
// b) 3 blocks of total size (50B + 100B + 100B)
669
- assertTrue ("BalancerResult is not as expected. " + balancerResult ,
670
- (balancerResult .getBytesAlreadyMoved () == 200
677
+ assertTrue ((balancerResult .getBytesAlreadyMoved () == 200
671
678
&& balancerResult .getBlocksMoved () == 2 )
672
679
|| (balancerResult .getBytesAlreadyMoved () == 250
673
- && balancerResult .getBlocksMoved () == 3 ));
680
+ && balancerResult .getBlocksMoved () == 3 ),
681
+ "BalancerResult is not as expected. " + balancerResult );
674
682
// 100% and 95% used nodes will be balanced, so top used will be 900
675
683
assertEquals (900 , maxUsage );
676
684
}
677
685
678
- @ Test (timeout = 60000 )
686
+ @ Test
687
+ @ Timeout (value = 60 )
679
688
public void testBalancerWithLimitOverUtilizedNum () throws Exception {
680
689
final Configuration conf = new HdfsConfiguration ();
681
690
// Init the config (block size to 100)
@@ -762,12 +771,14 @@ public void testBalancerWithLimitOverUtilizedNum() throws Exception {
762
771
}
763
772
// The maxUsage value is 950, only 100% of the nodes will be balanced
764
773
assertEquals (950 , maxUsage );
765
- assertTrue ("BalancerResult is not as expected. " + balancerResult ,
766
- (balancerResult .getBytesAlreadyMoved () == 100 && balancerResult .getBlocksMoved () == 1 ));
774
+ assertTrue (
775
+ (balancerResult .getBytesAlreadyMoved () == 100 && balancerResult .getBlocksMoved () == 1 ),
776
+ "BalancerResult is not as expected. " + balancerResult );
767
777
}
768
778
}
769
779
770
- @ Test (timeout = 60000 )
780
+ @ Test
781
+ @ Timeout (value = 60 )
771
782
public void testBalancerMetricsDuplicate () throws Exception {
772
783
final Configuration conf = new HdfsConfiguration ();
773
784
// Init the config (block size to 100)
@@ -824,7 +835,8 @@ public void testBalancerMetricsDuplicate() throws Exception {
824
835
}
825
836
}
826
837
827
- @ Test (timeout = 100000 )
838
+ @ Test
839
+ @ Timeout (value = 100 )
828
840
public void testMaxIterationTime () throws Exception {
829
841
final Configuration conf = new HdfsConfiguration ();
830
842
initConf (conf );
@@ -881,8 +893,8 @@ public void testMaxIterationTime() throws Exception {
881
893
// (highly unlikely) and then a block is moved unexpectedly,
882
894
// IN_PROGRESS will be reported. This is highly unlikely unexpected
883
895
// case. See HDFS-15989.
884
- assertEquals ("We expect ExitStatus.NO_MOVE_PROGRESS to be reported." ,
885
- ExitStatus .NO_MOVE_PROGRESS , r . getExitStatus () );
896
+ assertEquals (ExitStatus .NO_MOVE_PROGRESS , r . getExitStatus () ,
897
+ "We expect ExitStatus.NO_MOVE_PROGRESS to be reported." );
886
898
assertEquals (0 , r .getBlocksMoved ());
887
899
}
888
900
} finally {
0 commit comments