@@ -76,14 +76,16 @@ public void testFullRollingRestart() throws Exception {
7676 internalCluster ().startNode (settings );
7777
7878 // make sure the cluster state is green, and all has been recovered
79- assertTimeout (client ().admin ().cluster ().prepareHealth ().setWaitForEvents (Priority .LANGUID ).setTimeout (healthTimeout ).setWaitForGreenStatus ().setWaitForNoRelocatingShards (true ).setWaitForNodes ("3" ));
79+ assertTimeout (client ().admin ().cluster ().prepareHealth ().setWaitForEvents (Priority .LANGUID ).setTimeout (healthTimeout )
80+ .setWaitForGreenStatus ().setWaitForNoRelocatingShards (true ).setWaitForNodes ("3" ));
8081
8182 logger .info ("--> add two more nodes" );
8283 internalCluster ().startNode (settings );
8384 internalCluster ().startNode (settings );
8485
8586 // make sure the cluster state is green, and all has been recovered
86- assertTimeout (client ().admin ().cluster ().prepareHealth ().setWaitForEvents (Priority .LANGUID ).setTimeout (healthTimeout ).setWaitForGreenStatus ().setWaitForNoRelocatingShards (true ).setWaitForNodes ("5" ));
87+ assertTimeout (client ().admin ().cluster ().prepareHealth ().setWaitForEvents (Priority .LANGUID ).setTimeout (healthTimeout )
88+ .setWaitForGreenStatus ().setWaitForNoRelocatingShards (true ).setWaitForNodes ("5" ));
8789
8890 logger .info ("--> refreshing and checking data" );
8991 refresh ();
@@ -94,11 +96,13 @@ public void testFullRollingRestart() throws Exception {
9496 // now start shutting nodes down
9597 internalCluster ().stopRandomDataNode ();
9698 // make sure the cluster state is green, and all has been recovered
97- assertTimeout (client ().admin ().cluster ().prepareHealth ().setWaitForEvents (Priority .LANGUID ).setTimeout (healthTimeout ).setWaitForGreenStatus ().setWaitForNoRelocatingShards (true ).setWaitForNodes ("4" ));
99+ assertTimeout (client ().admin ().cluster ().prepareHealth ().setWaitForEvents (Priority .LANGUID ).setTimeout (healthTimeout )
100+ .setWaitForGreenStatus ().setWaitForNoRelocatingShards (true ).setWaitForNodes ("4" ));
98101
99102 internalCluster ().stopRandomDataNode ();
100103 // make sure the cluster state is green, and all has been recovered
101- assertTimeout (client ().admin ().cluster ().prepareHealth ().setWaitForEvents (Priority .LANGUID ).setTimeout (healthTimeout ).setWaitForGreenStatus ().setWaitForNoRelocatingShards (true ).setWaitForNodes ("3" ));
104+ assertTimeout (client ().admin ().cluster ().prepareHealth ().setWaitForEvents (Priority .LANGUID ).setTimeout (healthTimeout )
105+ .setWaitForGreenStatus ().setWaitForNoRelocatingShards (true ).setWaitForNodes ("3" ));
102106
103107 logger .info ("--> stopped two nodes, verifying data" );
104108 refresh ();
@@ -109,12 +113,14 @@ public void testFullRollingRestart() throws Exception {
109113 // closing the 3rd node
110114 internalCluster ().stopRandomDataNode ();
111115 // make sure the cluster state is green, and all has been recovered
112- assertTimeout (client ().admin ().cluster ().prepareHealth ().setWaitForEvents (Priority .LANGUID ).setTimeout (healthTimeout ).setWaitForGreenStatus ().setWaitForNoRelocatingShards (true ).setWaitForNodes ("2" ));
116+ assertTimeout (client ().admin ().cluster ().prepareHealth ().setWaitForEvents (Priority .LANGUID ).setTimeout (healthTimeout )
117+ .setWaitForGreenStatus ().setWaitForNoRelocatingShards (true ).setWaitForNodes ("2" ));
113118
114119 internalCluster ().stopRandomDataNode ();
115120
116121 // make sure the cluster state is yellow, and all has been recovered
117- assertTimeout (client ().admin ().cluster ().prepareHealth ().setWaitForEvents (Priority .LANGUID ).setTimeout (healthTimeout ).setWaitForYellowStatus ().setWaitForNoRelocatingShards (true ).setWaitForNodes ("1" ));
122+ assertTimeout (client ().admin ().cluster ().prepareHealth ().setWaitForEvents (Priority .LANGUID ).setTimeout (healthTimeout )
123+ .setWaitForYellowStatus ().setWaitForNoRelocatingShards (true ).setWaitForNodes ("1" ));
118124
119125 logger .info ("--> one node left, verifying data" );
120126 refresh ();
@@ -133,7 +139,9 @@ public void testNoRebalanceOnRollingRestart() throws Exception {
133139 * to relocating to the restarting node since all had 2 shards and now one node has nothing allocated.
134140 * We have a fix for this to wait until we have allocated unallocated shards now so this shouldn't happen.
135141 */
136- prepareCreate ("test" ).setSettings (Settings .builder ().put (IndexMetaData .SETTING_NUMBER_OF_SHARDS , "6" ).put (IndexMetaData .SETTING_NUMBER_OF_REPLICAS , "0" ).put (UnassignedInfo .INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING .getKey (), TimeValue .timeValueMinutes (1 ))).get ();
142+ prepareCreate ("test" ).setSettings (Settings .builder ().put (IndexMetaData .SETTING_NUMBER_OF_SHARDS , "6" )
143+ .put (IndexMetaData .SETTING_NUMBER_OF_REPLICAS , "0" )
144+ .put (UnassignedInfo .INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING .getKey (), TimeValue .timeValueMinutes (1 ))).get ();
137145
138146 for (int i = 0 ; i < 100 ; i ++) {
139147 client ().prepareIndex ("test" , "type1" , Long .toString (i ))
@@ -152,7 +160,8 @@ public void testNoRebalanceOnRollingRestart() throws Exception {
152160
153161 recoveryResponse = client ().admin ().indices ().prepareRecoveries ("test" ).get ();
154162 for (RecoveryState recoveryState : recoveryResponse .shardRecoveryStates ().get ("test" )) {
155- assertTrue ("relocated from: " + recoveryState .getSourceNode () + " to: " + recoveryState .getTargetNode ()+ "-- \n before: \n " + state ,
163+ assertTrue ("relocated from: " + recoveryState .getSourceNode () + " to: " +
164+ recoveryState .getTargetNode ()+ "-- \n before: \n " + state ,
156165 recoveryState .getRecoverySource ().getType () != RecoverySource .Type .PEER || recoveryState .getPrimary () == false );
157166 }
158167 }
0 commit comments