@@ -188,7 +188,7 @@ private void verifyStats(NameNode namenode, FSNamesystem fsn,
188188 /**
189189 * Tests decommission for non federated cluster
190190 */
191- @ Test ( timeout = 360000 )
191+ @ Test
192192 public void testDecommission () throws IOException {
193193 testDecommission (1 , 6 );
194194 }
@@ -198,7 +198,7 @@ public void testDecommission() throws IOException {
198198 * to other datanodes and satisfy the replication factor. Make sure the
199199 * datanode won't get stuck in decommissioning state.
200200 */
201- @ Test ( timeout = 360000 )
201+ @ Test
202202 public void testDecommission2 () throws IOException {
203203 LOG .info ("Starting test testDecommission" );
204204 int numNamenodes = 1 ;
@@ -247,7 +247,7 @@ public void testDecommission2() throws IOException {
247247 /**
248248 * Test decommission for federeated cluster
249249 */
250- @ Test ( timeout = 360000 )
250+ @ Test
251251 public void testDecommissionFederation () throws IOException {
252252 testDecommission (2 , 2 );
253253 }
@@ -262,7 +262,7 @@ public void testDecommissionFederation() throws IOException {
262262 * That creates inconsistent state and prevent SBN from finishing
263263 * decommission.
264264 */
265- @ Test ( timeout = 360000 )
265+ @ Test
266266 public void testDecommissionOnStandby () throws Exception {
267267 getConf ().setInt (DFSConfigKeys .DFS_HA_TAILEDITS_PERIOD_KEY , 1 );
268268 getConf ().setInt (DFSConfigKeys .DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY ,
@@ -435,7 +435,7 @@ private void testDecommission(int numNamenodes, int numDatanodes)
435435 /**
436436 * Test that over-replicated blocks are deleted on recommission.
437437 */
438- @ Test ( timeout = 120000 )
438+ @ Test
439439 public void testRecommission () throws Exception {
440440 final int numDatanodes = 6 ;
441441 try {
@@ -516,7 +516,7 @@ public Boolean get() {
516516 * Tests cluster storage statistics during decommissioning for non
517517 * federated cluster
518518 */
519- @ Test ( timeout = 360000 )
519+ @ Test
520520 public void testClusterStats () throws Exception {
521521 testClusterStats (1 );
522522 }
@@ -525,7 +525,7 @@ public void testClusterStats() throws Exception {
525525 * Tests cluster storage statistics during decommissioning for
526526 * federated cluster
527527 */
528- @ Test ( timeout = 360000 )
528+ @ Test
529529 public void testClusterStatsFederation () throws Exception {
530530 testClusterStats (3 );
531531 }
@@ -575,7 +575,7 @@ private DataNode getDataNode(DatanodeInfo decomInfo) {
575575 * in the include file are allowed to connect to the namenode in a non
576576 * federated cluster.
577577 */
578- @ Test ( timeout = 360000 )
578+ @ Test
579579 public void testHostsFile () throws IOException , InterruptedException {
580580 // Test for a single namenode cluster
581581 testHostsFile (1 );
@@ -586,7 +586,7 @@ public void testHostsFile() throws IOException, InterruptedException {
586586 * in the include file are allowed to connect to the namenode in a
587587 * federated cluster.
588588 */
589- @ Test ( timeout = 360000 )
589+ @ Test
590590 public void testHostsFileFederation ()
591591 throws IOException , InterruptedException {
592592 // Test for 3 namenode federated cluster
@@ -624,7 +624,7 @@ public void testHostsFile(int numNameNodes) throws IOException,
624624 }
625625 }
626626
627- @ Test ( timeout = 120000 )
627+ @ Test
628628 public void testDecommissionWithOpenfile ()
629629 throws IOException , InterruptedException {
630630 LOG .info ("Starting test testDecommissionWithOpenfile" );
@@ -676,7 +676,7 @@ public void testDecommissionWithOpenfile()
676676 fdos .close ();
677677 }
678678
679- @ Test ( timeout = 20000 )
679+ @ Test
680680 public void testDecommissionWithUnknownBlock () throws IOException {
681681 startCluster (1 , 3 );
682682
@@ -795,7 +795,7 @@ public Boolean get() {
795795 }
796796 }
797797
798- @ Test ( timeout = 180000 )
798+ @ Test
799799 public void testDecommissionWithOpenfileReporting ()
800800 throws Exception {
801801 LOG .info ("Starting test testDecommissionWithOpenfileReporting" );
@@ -901,7 +901,7 @@ public void run() {
901901 * 2. close file with decommissioning
902902 * @throws Exception
903903 */
904- @ Test ( timeout = 360000 )
904+ @ Test
905905 public void testDecommissionWithCloseFileAndListOpenFiles ()
906906 throws Exception {
907907 LOG .info ("Starting test testDecommissionWithCloseFileAndListOpenFiles" );
@@ -958,7 +958,7 @@ public void testDecommissionWithCloseFileAndListOpenFiles()
958958 fileSys .delete (file , false );
959959 }
960960
961- @ Test ( timeout = 360000 )
961+ @ Test
962962 public void testDecommissionWithOpenFileAndBlockRecovery ()
963963 throws IOException , InterruptedException {
964964 startCluster (1 , 6 );
@@ -1005,7 +1005,7 @@ public void testDecommissionWithOpenFileAndBlockRecovery()
10051005 assertEquals (dfs .getFileStatus (file ).getLen (), writtenBytes );
10061006 }
10071007
1008- @ Test ( timeout = 120000 )
1008+ @ Test
10091009 public void testCloseWhileDecommission () throws IOException ,
10101010 ExecutionException , InterruptedException {
10111011 LOG .info ("Starting test testCloseWhileDecommission" );
@@ -1064,7 +1064,7 @@ public void testCloseWhileDecommission() throws IOException,
10641064 * to the IBR, all three nodes dn1/dn2/dn3 enter Decommissioning and then the
10651065 * DN reports the IBR.
10661066 */
1067- @ Test ( timeout = 120000 )
1067+ @ Test
10681068 public void testAllocAndIBRWhileDecommission () throws IOException {
10691069 LOG .info ("Starting test testAllocAndIBRWhileDecommission" );
10701070 getConf ().setLong (DFSConfigKeys .DFS_BLOCKREPORT_INTERVAL_MSEC_KEY ,
@@ -1149,7 +1149,7 @@ public void testAllocAndIBRWhileDecommission() throws IOException {
11491149 /**
11501150 * Tests restart of namenode while datanode hosts are added to exclude file
11511151 **/
1152- @ Test ( timeout = 360000 )
1152+ @ Test
11531153 public void testDecommissionWithNamenodeRestart ()
11541154 throws IOException , InterruptedException {
11551155 LOG .info ("Starting test testDecommissionWithNamenodeRestart" );
@@ -1201,7 +1201,7 @@ public void testDecommissionWithNamenodeRestart()
12011201 /**
12021202 * Tests dead node count after restart of namenode
12031203 **/
1204- @ Test ( timeout = 360000 )
1204+ @ Test
12051205 public void testDeadNodeCountAfterNamenodeRestart ()throws Exception {
12061206 LOG .info ("Starting test testDeadNodeCountAfterNamenodeRestart" );
12071207 int numNamenodes = 1 ;
@@ -1248,7 +1248,7 @@ public void testDeadNodeCountAfterNamenodeRestart()throws Exception {
12481248 * valid DNS hostname for the DataNode. See HDFS-5237 for background.
12491249 */
12501250 @ Ignore
1251- @ Test ( timeout = 360000 )
1251+ @ Test
12521252 public void testIncludeByRegistrationName () throws Exception {
12531253 // Any IPv4 address starting with 127 functions as a "loopback" address
12541254 // which is connected to the current host. So by choosing 127.0.0.100
@@ -1314,7 +1314,7 @@ public Boolean get() {
13141314 }, 500 , 5000 );
13151315 }
13161316
1317- @ Test ( timeout = 120000 )
1317+ @ Test
13181318 public void testBlocksPerInterval () throws Exception {
13191319 GenericTestUtils .setLogLevel (
13201320 LoggerFactory .getLogger (DatanodeAdminManager .class ), Level .TRACE );
@@ -1369,7 +1369,7 @@ private void doDecomCheck(DatanodeManager datanodeManager,
13691369 /**
13701370 * Test DatanodeAdminManager#monitor can swallow any exceptions by default.
13711371 */
1372- @ Test ( timeout = 120000 )
1372+ @ Test
13731373 public void testPendingNodeButDecommissioned () throws Exception {
13741374 // Only allow one node to be decom'd at a time
13751375 getConf ().setInt (
@@ -1416,7 +1416,7 @@ public void testPendingNodeButDecommissioned() throws Exception {
14161416 }
14171417 }
14181418
1419- @ Test ( timeout = 120000 )
1419+ @ Test
14201420 public void testPendingNodes () throws Exception {
14211421 GenericTestUtils .setLogLevel (
14221422 LoggerFactory .getLogger (DatanodeAdminManager .class ), Level .TRACE );
@@ -1639,7 +1639,7 @@ public void testUsedCapacity() throws Exception {
16391639 /**
16401640 * Verify if multiple DataNodes can be decommission at the same time.
16411641 */
1642- @ Test ( timeout = 360000 )
1642+ @ Test
16431643 public void testMultipleNodesDecommission () throws Exception {
16441644 startCluster (1 , 5 );
16451645 final Path file = new Path ("/testMultipleNodesDecommission.dat" );
@@ -1685,7 +1685,7 @@ public Boolean get() {
16851685 * Force the tracked nodes set to be filled with nodes lost while decommissioning,
16861686 * then decommission healthy nodes & validate they are decommissioned eventually.
16871687 */
1688- @ Test ( timeout = 120000 )
1688+ @ Test
16891689 public void testRequeueUnhealthyDecommissioningNodes () throws Exception {
16901690 // Create a MiniDFSCluster with 3 live datanode in AdminState=NORMAL and
16911691 // 2 dead datanodes in AdminState=DECOMMISSION_INPROGRESS and a file
@@ -1911,7 +1911,8 @@ private void createClusterWithDeadNodesDecommissionInProgress(final int numLiveN
19111911 under-replicated block can be replicated to sufficient datanodes & the decommissioning
19121912 node can be decommissioned.
19131913 */
1914- @ Test (timeout = 60000 )
1914+ @ SuppressWarnings ("checkstyle:methodlength" )
1915+ @ Test
19151916 public void testDeleteCorruptReplicaForUnderReplicatedBlock () throws Exception {
19161917 // Constants
19171918 final Path file = new Path ("/test-file" );
0 commit comments