From 7008d502260e420db6fee53208d50ca907625a78 Mon Sep 17 00:00:00 2001 From: Clara Xiong Date: Mon, 12 Jul 2021 15:45:11 -0700 Subject: [PATCH 1/4] HBASE-25739 TableSkewCostFunction need to use aggregated deviation - backport 2.3 --- .../master/balancer/BaseLoadBalancer.java | 57 +++++++++++------ .../master/balancer/DoubleArrayCost.java | 53 ++++++++++------ .../balancer/StochasticLoadBalancer.java | 63 ++++++++++--------- .../master/balancer/BalancerTestBase.java | 1 + .../master/balancer/TestBaseLoadBalancer.java | 4 +- ...tStochasticLoadBalancerBalanceCluster.java | 3 +- ...ochasticLoadBalancerHeterogeneousCost.java | 1 - ...estStochasticLoadBalancerLargeCluster.java | 3 + 8 files changed, 109 insertions(+), 76 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index b9585b55b294..88b9063486bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -157,7 +157,12 @@ protected static class Cluster { int[] regionIndexToServerIndex; //regionIndex -> serverIndex int[] initialRegionIndexToServerIndex; //regionIndex -> serverIndex (initial cluster state) int[] regionIndexToTableIndex; //regionIndex -> tableIndex - int[][] numRegionsPerServerPerTable; //serverIndex -> tableIndex -> # regions + int[][] numRegionsPerServerPerTable; // serverIndex -> tableIndex -> # regions + int[] numRegionsPerTable; // tableIndex -> region count + double[] meanRegionsPerTable; // mean region count per table + double[] regionSkewByTable; // skew on RS per by table + double[] minRegionSkewByTable; // min skew on RS per by table + double[] maxRegionSkewByTable; // max skew on RS per by table int[] numMaxRegionsPerTable; //tableIndex -> max number of regions in a single RS int[] regionIndexToPrimaryIndex; //regionIndex -> regionIndex of the primary boolean hasRegionReplicas = false; //whether there is regions with replicas @@ -365,7 +370,9 @@ protected Cluster( } numTables = tables.size(); + LOG.debug("Number of tables={}", numTables); numRegionsPerServerPerTable = new int[numServers][numTables]; + numRegionsPerTable = new int[numTables]; for (int i = 0; i < numServers; i++) { for (int j = 0; j < numTables; j++) { @@ -376,15 +383,26 @@ protected Cluster( for (int i=0; i < regionIndexToServerIndex.length; i++) { if (regionIndexToServerIndex[i] >= 0) { numRegionsPerServerPerTable[regionIndexToServerIndex[i]][regionIndexToTableIndex[i]]++; + numRegionsPerTable[regionIndexToTableIndex[i]]++; } } - numMaxRegionsPerTable = new int[numTables]; + // Avoid repeated computation for planning + meanRegionsPerTable = new double[numTables]; + regionSkewByTable = new double[numTables]; + maxRegionSkewByTable = new double[numTables]; + minRegionSkewByTable = new double[numTables]; + + for (int i = 0; i < numTables; i++) { + meanRegionsPerTable[i] = Double.valueOf(numRegionsPerTable[i]) / numServers; + minRegionSkewByTable[i] += DoubleArrayCost.getMinSkew(numRegionsPerTable[i], numServers); + maxRegionSkewByTable[i] += DoubleArrayCost.getMaxSkew(numRegionsPerTable[i], numServers); + } + for (int[] aNumRegionsPerServerPerTable : numRegionsPerServerPerTable) { - for (tableIndex = 0; tableIndex < aNumRegionsPerServerPerTable.length; tableIndex++) { - if (aNumRegionsPerServerPerTable[tableIndex] > numMaxRegionsPerTable[tableIndex]) { - numMaxRegionsPerTable[tableIndex] = aNumRegionsPerServerPerTable[tableIndex]; - } + for (int tableIdx = 0; tableIdx < aNumRegionsPerServerPerTable.length; tableIdx++) { + regionSkewByTable[tableIdx] += + Math.abs(aNumRegionsPerServerPerTable[tableIdx] - meanRegionsPerTable[tableIdx]); } } @@ -824,22 +842,13 @@ void regionMoved(int region, int oldServer, int newServer) { int tableIndex = regionIndexToTableIndex[region]; if (oldServer >= 0) { numRegionsPerServerPerTable[oldServer][tableIndex]--; + // update regionSkewPerTable for the move from old server + regionSkewByTable[tableIndex] += getSkewChangeFor(oldServer, tableIndex, -1); } numRegionsPerServerPerTable[newServer][tableIndex]++; - //check whether this caused maxRegionsPerTable in the new Server to be updated - if (numRegionsPerServerPerTable[newServer][tableIndex] > numMaxRegionsPerTable[tableIndex]) { - numMaxRegionsPerTable[tableIndex] = numRegionsPerServerPerTable[newServer][tableIndex]; - } else if (oldServer >= 0 && (numRegionsPerServerPerTable[oldServer][tableIndex] + 1) - == numMaxRegionsPerTable[tableIndex]) { - //recompute maxRegionsPerTable since the previous value was coming from the old server - numMaxRegionsPerTable[tableIndex] = 0; - for (int[] aNumRegionsPerServerPerTable : numRegionsPerServerPerTable) { - if (aNumRegionsPerServerPerTable[tableIndex] > numMaxRegionsPerTable[tableIndex]) { - numMaxRegionsPerTable[tableIndex] = aNumRegionsPerServerPerTable[tableIndex]; - } - } - } + // update regionSkewPerTable for the move to new server + regionSkewByTable[tableIndex] += getSkewChangeFor(newServer, tableIndex, 1); // update for servers int primary = regionIndexToPrimaryIndex[region]; @@ -1011,12 +1020,20 @@ public String toString() { .append(Arrays.toString(serverIndicesSortedByRegionCount)) .append(", regionsPerServer=").append(Arrays.deepToString(regionsPerServer)); - desc.append(", numMaxRegionsPerTable=").append(Arrays.toString(numMaxRegionsPerTable)) + desc.append(", regionSkewByTable=").append(Arrays.toString(regionSkewByTable)) .append(", numRegions=").append(numRegions).append(", numServers=").append(numServers) .append(", numTables=").append(numTables).append(", numMovedRegions=") .append(numMovedRegions).append('}'); return desc.toString(); } + + private double getSkewChangeFor(int serverIndex, int tableIndex, double regionCountChange) { + double curSkew = Math + .abs(numRegionsPerServerPerTable[serverIndex][tableIndex] - meanRegionsPerTable[tableIndex]); + double oldSkew = Math.abs( + numRegionsPerServerPerTable[serverIndex][tableIndex] - regionCountChange - meanRegionsPerTable[tableIndex]); + return curSkew - oldSkew; + } } // slop for regions diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java index 154adcd5d2ff..93a20b910b96 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java @@ -72,31 +72,14 @@ private static double computeCost(double[] stats) { double count = stats.length; double mean = total / count; - // Compute max as if all region servers had 0 and one had the sum of all costs. This must be - // a zero sum cost for this to make sense. - double max = ((count - 1) * mean) + (total - mean); - - // It's possible that there aren't enough regions to go around - double min; - if (count > total) { - min = ((count - total) * mean) + ((1 - mean) * total); - } else { - // Some will have 1 more than everything else. - int numHigh = (int) (total - (Math.floor(mean) * count)); - int numLow = (int) (count - numHigh); - - min = (numHigh * (Math.ceil(mean) - mean)) + (numLow * (mean - Math.floor(mean))); - - } - min = Math.max(0, min); for (int i = 0; i < stats.length; i++) { double n = stats[i]; double diff = Math.abs(mean - n); totalCost += diff; } - double scaled = StochasticLoadBalancer.scale(min, max, totalCost); - return scaled; + return StochasticLoadBalancer.scale(getMinSkew(total, count), + getMaxSkew(total, count), totalCost); } private static double getSum(double[] stats) { @@ -106,4 +89,34 @@ private static double getSum(double[] stats) { } return total; } -} \ No newline at end of file + + /** + * Return the min skew of distribution + * @param total is total number of regions + */ + public static double getMinSkew(double total, double numServers) { + double mean = total / numServers; + // It's possible that there aren't enough regions to go around + double min; + if (numServers > total) { + min = ((numServers - total) * mean + (1 - mean) * total) ; + } else { + // Some will have 1 more than everything else. + int numHigh = (int) (total - (Math.floor(mean) * numServers)); + int numLow = (int) (numServers - numHigh); + min = numHigh * (Math.ceil(mean) - mean) + numLow * (mean - Math.floor(mean)); + } + return min; + } + + /** + * Return the max deviation of distribution + * Compute max as if all region servers had 0 and one had the sum of all costs. This must be + * a zero sum cost for this to make sense. + * @param total is total number of regions + */ + public static double getMaxSkew(double total, double numServers) { + double mean = total / numServers; + return (total - mean) + (numServers - 1) * mean; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index b51eb2613a84..b666d06d507e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -131,6 +131,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { protected static final Random RANDOM = new Random(System.currentTimeMillis()); private static final Logger LOG = LoggerFactory.getLogger(StochasticLoadBalancer.class); + public static final double COST_EPSILON = 0.0001; Map> loads = new HashMap<>(); @@ -140,7 +141,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { private int stepsPerRegion = 800; private long maxRunningTime = 30 * 1000 * 1; // 30 seconds. private int numRegionLoadsToRemember = 15; - private float minCostNeedBalance = 0.05f; + private float minCostNeedBalance = 0.025f; private List candidateGenerators; private List costFunctions; // FindBugs: Wants this protected; IS2_INCONSISTENT_SYNC @@ -215,9 +216,11 @@ public synchronized void setConf(Configuration conf) { curFunctionCosts = new double[costFunctions.size()]; tempFunctionCosts = new double[costFunctions.size()]; - LOG.info("Loaded config; maxSteps=" + maxSteps + ", stepsPerRegion=" + stepsPerRegion + - ", maxRunningTime=" + maxRunningTime + ", isByTable=" + isByTable + ", CostFunctions=" + - Arrays.toString(getCostFunctionNames()) + " etc."); + LOG.info( + "Loaded config; maxSteps=" + maxSteps + ", runMaxSteps=" + runMaxSteps + + ", stepsPerRegion=" + stepsPerRegion + + ", maxRunningTime=" + maxRunningTime + ", isByTable=" + isByTable + + ", CostFunctions=" + Arrays.toString(getCostFunctionNames()) + " etc."); } private void loadCustomCostFunctions(Configuration conf) { @@ -706,7 +709,6 @@ Cluster.Action generate(Cluster cluster) { * Base class of StochasticLoadBalancer's Cost Functions. */ public abstract static class CostFunction { - private float multiplier = 0; protected Cluster cluster; @@ -763,24 +765,6 @@ protected void regionMoved(int region, int oldServer, int newServer) { protected abstract double cost(); } - /** - * Scale the value between 0 and 1. - * @param min Min value - * @param max The Max value - * @param value The value to be scaled. - * @return The scaled value. - */ - static double scale(double min, double max, double value) { - if (max <= min || value <= min) { - return 0; - } - if ((max - min) == 0) { - return 0; - } - - return Math.max(0d, Math.min(1d, (value - min) / (max - min))); - } - /** * Given the starting state of the regions and a potential ending state * compute cost based upon the number of regions that have moved. @@ -970,15 +954,12 @@ static class TableSkewCostFunction extends CostFunction { @Override protected double cost() { - double max = cluster.numRegions; - double min = ((double) cluster.numRegions) / cluster.numServers; - double value = 0; - - for (int i = 0; i < cluster.numMaxRegionsPerTable.length; i++) { - value += cluster.numMaxRegionsPerTable[i]; + double cost = 0; + for (int tableIdx = 0; tableIdx < cluster.numTables; tableIdx++) { + cost += scale(cluster.minRegionSkewByTable[tableIdx], + cluster.maxRegionSkewByTable[tableIdx], cluster.regionSkewByTable[tableIdx]); } - - return scale(min, max, value); + return cost; } } @@ -1425,4 +1406,24 @@ protected double getCostFromRl(BalancerRegionLoad rl) { public static String composeAttributeName(String tableName, String costFunctionName) { return tableName + TABLE_FUNCTION_SEP + costFunctionName; } + + /** + * Scale the value between 0 and 1. + * @param min Min value + * @param max The Max value + * @param value The value to be scaled. + * @return The scaled value. + * TBD: To be refactored to CostFunction when cost funtion is refactored out + */ + static double scale(double min, double max, double value) { + if (max <= min || value <= min + || Math.abs(max - min) <= COST_EPSILON || Math.abs(value - min) <= COST_EPSILON) { + return 0; + } + if (max <= min || Math.abs(max - min) <= COST_EPSILON) { + return 0; + } + + return Math.max(0d, Math.min(1d, (value - min) / (max - min))); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java index b941dd5135dd..d1b33c57ba9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java @@ -73,6 +73,7 @@ public static void beforeAllTests() throws Exception { conf.setFloat("hbase.regions.slop", 0.0f); conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0); conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 3 * 60 * 1000); + conf.setBoolean("hbase.master.balancer.stochastic.runMaxSteps", true); loadBalancer = new StochasticLoadBalancer(); loadBalancer.setConf(conf); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java index 794ffd3230b9..0621116a6ea2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java @@ -389,8 +389,8 @@ public void testRegionAvailabilityWithRegionMoves() throws Exception { // now move region1 from servers[0] to servers[2] cluster.doAction(new MoveRegionAction(0, 0, 2)); - // check that the numMaxRegionsPerTable for "table" has increased to 2 - assertEquals(2, cluster.numMaxRegionsPerTable[0]); + // check that the regionSkewByTable for "table" has increased to 2 + assertEquals(2, cluster.regionSkewByTable[0], 0.01); // now repeat check whether moving region1 from servers[1] to servers[2] // would lower availability assertTrue(cluster.wouldLowerAvailability(hri1, servers[2])); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java index 2f778c40eaee..98e5c59950c2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java @@ -51,8 +51,7 @@ public class TestStochasticLoadBalancerBalanceCluster extends BalancerTestBase { */ @Test public void testBalanceCluster() throws Exception { - conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L); - conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 1000); // 90 sec + conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 3 * 60 * 1000); // 3 min conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); loadBalancer.setConf(conf); for (int[] mockCluster : clusterStateMocks) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java index 066e22a9246e..d472868514d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java @@ -64,7 +64,6 @@ public static void beforeAllTests() throws IOException { BalancerTestBase.conf.setFloat("hbase.master.balancer.stochastic.regionCountCost", 0); BalancerTestBase.conf.setFloat("hbase.master.balancer.stochastic.primaryRegionCountCost", 0); BalancerTestBase.conf.setFloat("hbase.master.balancer.stochastic.tableSkewCost", 0); - BalancerTestBase.conf.setBoolean("hbase.master.balancer.stochastic.runMaxSteps", true); BalancerTestBase.conf.set(StochasticLoadBalancer.COST_FUNCTIONS_COST_FUNCTIONS_KEY, HeterogeneousRegionCountCostFunction.class.getName()); // Need to ensure test dir has been created. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java index da38187cce67..e31cf132bce3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java @@ -38,6 +38,9 @@ public void testLargeCluster() { int numRegionsPerServer = 80; // all servers except one int numTables = 100; int replication = 1; + conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 6 * 60 * 1000); + conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); + loadBalancer.onConfigurationChange(conf); testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true); } } From 6ef3d94ec73f3dfcdf4c22f19ac9229c9c884761 Mon Sep 17 00:00:00 2001 From: Clara Xiong Date: Tue, 13 Jul 2021 16:53:08 -0700 Subject: [PATCH 2/4] precommit check fix --- .../hadoop/hbase/master/balancer/BaseLoadBalancer.java | 9 ++++----- .../hbase/master/balancer/StochasticLoadBalancer.java | 1 - 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index 88b9063486bb..ee5e9073a7f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -163,7 +163,6 @@ protected static class Cluster { double[] regionSkewByTable; // skew on RS per by table double[] minRegionSkewByTable; // min skew on RS per by table double[] maxRegionSkewByTable; // max skew on RS per by table - int[] numMaxRegionsPerTable; //tableIndex -> max number of regions in a single RS int[] regionIndexToPrimaryIndex; //regionIndex -> regionIndex of the primary boolean hasRegionReplicas = false; //whether there is regions with replicas @@ -1028,10 +1027,10 @@ public String toString() { } private double getSkewChangeFor(int serverIndex, int tableIndex, double regionCountChange) { - double curSkew = Math - .abs(numRegionsPerServerPerTable[serverIndex][tableIndex] - meanRegionsPerTable[tableIndex]); - double oldSkew = Math.abs( - numRegionsPerServerPerTable[serverIndex][tableIndex] - regionCountChange - meanRegionsPerTable[tableIndex]); + double curSkew = Math.abs(numRegionsPerServerPerTable[serverIndex][tableIndex] - + meanRegionsPerTable[tableIndex]); + double oldSkew = Math.abs(numRegionsPerServerPerTable[serverIndex][tableIndex] - + regionCountChange - meanRegionsPerTable[tableIndex]); return curSkew - oldSkew; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index b666d06d507e..a7ccc411bb18 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -1413,7 +1413,6 @@ public static String composeAttributeName(String tableName, String costFunctionN * @param max The Max value * @param value The value to be scaled. * @return The scaled value. - * TBD: To be refactored to CostFunction when cost funtion is refactored out */ static double scale(double min, double max, double value) { if (max <= min || value <= min From cc1342e7112d3f34abb449104e4d979803891e48 Mon Sep 17 00:00:00 2001 From: Clara Xiong Date: Tue, 13 Jul 2021 21:47:15 -0700 Subject: [PATCH 3/4] merge conflict --- .../balancer/TestStochasticLoadBalancerBalanceCluster.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java index 98e5c59950c2..974e94218ffc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java @@ -51,14 +51,13 @@ public class TestStochasticLoadBalancerBalanceCluster extends BalancerTestBase { */ @Test public void testBalanceCluster() throws Exception { - conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 3 * 60 * 1000); // 3 min + conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 800 * 1000); // 800 sec conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); loadBalancer.setConf(conf); for (int[] mockCluster : clusterStateMocks) { Map> servers = mockClusterServers(mockCluster); List list = convertToList(servers); LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); - Map>> LoadOfAllTable = (Map) mockClusterServersWithTables(servers); List plans = loadBalancer.balanceCluster(LoadOfAllTable); From aa91a799b027268a2892eb44e864b3b42fddce72 Mon Sep 17 00:00:00 2001 From: Clara Xiong Date: Thu, 15 Jul 2021 12:19:38 -0700 Subject: [PATCH 4/4] test config --- .../apache/hadoop/hbase/master/balancer/BalancerTestBase.java | 2 -- .../balancer/TestStochasticLoadBalancerBalanceCluster.java | 3 ++- .../balancer/TestStochasticLoadBalancerLargeCluster.java | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java index d1b33c57ba9d..685088fed2c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java @@ -72,8 +72,6 @@ public static void beforeAllTests() throws Exception { conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 0.75f); conf.setFloat("hbase.regions.slop", 0.0f); conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0); - conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 3 * 60 * 1000); - conf.setBoolean("hbase.master.balancer.stochastic.runMaxSteps", true); loadBalancer = new StochasticLoadBalancer(); loadBalancer.setConf(conf); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java index 974e94218ffc..be7eecca7815 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java @@ -51,8 +51,9 @@ public class TestStochasticLoadBalancerBalanceCluster extends BalancerTestBase { */ @Test public void testBalanceCluster() throws Exception { - conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 800 * 1000); // 800 sec + conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 3 * 60 * 1000); // 800 sec conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); + conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 20000000L); loadBalancer.setConf(conf); for (int[] mockCluster : clusterStateMocks) { Map> servers = mockClusterServers(mockCluster); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java index e31cf132bce3..7732c78fb7db 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java @@ -39,7 +39,7 @@ public void testLargeCluster() { int numTables = 100; int replication = 1; conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 6 * 60 * 1000); - conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); + conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 20000000L); loadBalancer.onConfigurationChange(conf); testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true); }