diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java index 4b3809c107cb..d60a29a6a70d 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java @@ -106,6 +106,7 @@ class BalancerClusterState { int numRacks; int numTables; int numRegions; + int maxReplicas = 1; int numMovedRegions = 0; // num moved regions from the initial configuration Map> clusterState; @@ -446,6 +447,11 @@ private void registerRegion(RegionInfo region, int regionIndex, int serverIndex, : serversToIndex.get(loc.get(i).getAddress())); } } + + int numReplicas = region.getReplicaId() + 1; + if (numReplicas > maxReplicas) { + maxReplicas = numReplicas; + } } /** diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CacheAwareLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CacheAwareLoadBalancer.java index bc31a317aebd..385ed564c7d6 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CacheAwareLoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CacheAwareLoadBalancer.java @@ -68,12 +68,13 @@ public synchronized void loadConf(Configuration configuration) { } @Override - protected List createCandidateGenerators() { - List candidateGenerators = new ArrayList<>(2); - candidateGenerators.add(GeneratorFunctionType.LOAD.ordinal(), + protected Map, CandidateGenerator> + createCandidateGenerators() { + Map, CandidateGenerator> candidateGenerators = + new HashMap<>(2); + candidateGenerators.put(CacheAwareSkewnessCandidateGenerator.class, new CacheAwareSkewnessCandidateGenerator()); - candidateGenerators.add(GeneratorFunctionType.CACHE_RATIO.ordinal(), - new CacheAwareCandidateGenerator()); + candidateGenerators.put(CacheAwareCandidateGenerator.class, new CacheAwareCandidateGenerator()); return candidateGenerators; } @@ -409,8 +410,9 @@ protected void regionMoved(int region, int oldServer, int newServer) { }); } - public final void updateWeight(double[] weights) { - weights[GeneratorFunctionType.LOAD.ordinal()] += cost(); + @Override + public final void updateWeight(Map, Double> weights) { + weights.merge(LoadCandidateGenerator.class, cost(), Double::sum); } } @@ -478,8 +480,8 @@ private int getServerWithBestCacheRatioForRegion(int region) { } @Override - public final void updateWeight(double[] weights) { - weights[GeneratorFunctionType.CACHE_RATIO.ordinal()] += cost(); + public void updateWeight(Map, Double> weights) { + weights.merge(LoadCandidateGenerator.class, cost(), Double::sum); } } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java index 91e1ec61552c..1dcd4580b1a6 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.master.balancer; +import java.util.Map; import org.apache.yetus.audience.InterfaceAudience; /** @@ -91,8 +92,8 @@ protected void regionMoved(int region, int oldServer, int newServer) { * Called once per init or after postAction. * @param weights the weights for every generator. */ - public void updateWeight(double[] weights) { - weights[StochasticLoadBalancer.GeneratorType.RANDOM.ordinal()] += cost(); + public void updateWeight(Map, Double> weights) { + weights.merge(RandomCandidateGenerator.class, cost(), Double::sum); } /** diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java index fbe91a921daa..8668e7cae3c9 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java @@ -80,17 +80,20 @@ public void setFavoredNodesManager(FavoredNodesManager fnm) { } @Override - protected List createCandidateGenerators() { - List fnPickers = new ArrayList<>(2); - fnPickers.add(new FavoredNodeLoadPicker()); - fnPickers.add(new FavoredNodeLocalityPicker()); + protected Map, CandidateGenerator> + createCandidateGenerators() { + Map, CandidateGenerator> fnPickers = new HashMap<>(2); + fnPickers.put(FavoredNodeLoadPicker.class, new FavoredNodeLoadPicker()); + fnPickers.put(FavoredNodeLocalityPicker.class, new FavoredNodeLocalityPicker()); return fnPickers; } /** Returns any candidate generator in random */ @Override protected CandidateGenerator getRandomGenerator() { - return candidateGenerators.get(ThreadLocalRandom.current().nextInt(candidateGenerators.size())); + Class clazz = shuffledGeneratorClasses.get() + .get(ThreadLocalRandom.current().nextInt(candidateGenerators.size())); + return candidateGenerators.get(clazz); } /** diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCostFunction.java index 678c9a3e9adf..fc2d6cc12fb3 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCostFunction.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.master.balancer; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.master.balancer.BalancerClusterState.LocalityType; import org.apache.yetus.audience.InterfaceAudience; @@ -89,7 +90,7 @@ private double getWeightedLocality(int region, int entity) { } @Override - public final void updateWeight(double[] weights) { - weights[StochasticLoadBalancer.GeneratorType.LOCALITY.ordinal()] += cost(); + public final void updateWeight(Map, Double> weights) { + weights.merge(LocalityBasedCandidateGenerator.class, cost(), Double::sum); } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionCountSkewCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionCountSkewCostFunction.java index 442bbc9b7bcf..669f1df8f4fd 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionCountSkewCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionCountSkewCostFunction.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.master.balancer; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; @@ -61,7 +62,7 @@ protected void regionMoved(int region, int oldServer, int newServer) { } @Override - public final void updateWeight(double[] weights) { - weights[StochasticLoadBalancer.GeneratorType.LOAD.ordinal()] += cost(); + public final void updateWeight(Map, Double> weights) { + weights.merge(LoadCandidateGenerator.class, cost(), Double::sum); } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java index f482ada22b29..43eaceb4f775 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.master.balancer; +import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import org.agrona.collections.Hashing; import org.agrona.collections.Int2IntCounterMap; @@ -73,8 +74,8 @@ protected double cost() { } @Override - public final void updateWeight(double[] weights) { - weights[StochasticLoadBalancer.GeneratorType.RACK.ordinal()] += cost(); + public final void updateWeight(Map, Double> weights) { + weights.merge(RegionReplicaRackCandidateGenerator.class, cost(), Double::sum); } /** diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index 8405130c1c4c..7b99928897a2 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -22,11 +22,13 @@ import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Deque; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; @@ -48,6 +50,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.base.Suppliers; + /** *

* This is a best effort load balancer. Given a Cost function F(C) => x It will randomly try and @@ -147,7 +151,6 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { private double curOverallCost = 0d; private double[] tempFunctionCosts; private double[] curFunctionCosts; - private double[] weightsOfGenerators; // Keep locality based picker and cost function to alert them // when new services are offered @@ -157,14 +160,15 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { private RegionReplicaHostCostFunction regionReplicaHostCostFunction; private RegionReplicaRackCostFunction regionReplicaRackCostFunction; - protected List candidateGenerators; - - public enum GeneratorType { - RANDOM, - LOAD, - LOCALITY, - RACK - } + private Map, Double> weightsOfGenerators; + protected Map, CandidateGenerator> candidateGenerators; + protected final Supplier>> shuffledGeneratorClasses = + Suppliers.memoizeWithExpiration(() -> { + List> shuffled = + new ArrayList<>(candidateGenerators.keySet()); + Collections.shuffle(shuffled); + return shuffled; + }, 5, TimeUnit.SECONDS); /** * The constructor that pass a MetricsStochasticBalancer to BaseLoadBalancer to replace its @@ -213,16 +217,20 @@ private void loadCustomCostFunctions(Configuration conf) { @RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*/src/test/.*") - List getCandidateGenerators() { + Map, CandidateGenerator> getCandidateGenerators() { return this.candidateGenerators; } - protected List createCandidateGenerators() { - List candidateGenerators = new ArrayList(4); - candidateGenerators.add(GeneratorType.RANDOM.ordinal(), new RandomCandidateGenerator()); - candidateGenerators.add(GeneratorType.LOAD.ordinal(), new LoadCandidateGenerator()); - candidateGenerators.add(GeneratorType.LOCALITY.ordinal(), localityCandidateGenerator); - candidateGenerators.add(GeneratorType.RACK.ordinal(), + protected Map, CandidateGenerator> + createCandidateGenerators() { + Map, CandidateGenerator> candidateGenerators = + new HashMap<>(5); + candidateGenerators.put(RandomCandidateGenerator.class, new RandomCandidateGenerator()); + candidateGenerators.put(LoadCandidateGenerator.class, new LoadCandidateGenerator()); + candidateGenerators.put(LocalityBasedCandidateGenerator.class, localityCandidateGenerator); + candidateGenerators.put(RegionReplicaCandidateGenerator.class, + new RegionReplicaCandidateGenerator()); + candidateGenerators.put(RegionReplicaRackCandidateGenerator.class, new RegionReplicaRackCandidateGenerator()); return candidateGenerators; } @@ -331,10 +339,33 @@ void updateMetricsSize(int size) { } } - private boolean areSomeRegionReplicasColocated(BalancerClusterState c) { - regionReplicaHostCostFunction.prepare(c); - double cost = Math.abs(regionReplicaHostCostFunction.cost()); - return cost > CostFunction.getCostEpsilon(cost); + private boolean areSomeRegionReplicasColocatedOnHost(BalancerClusterState c) { + if (c.numHosts >= c.maxReplicas) { + regionReplicaHostCostFunction.prepare(c); + double hostCost = Math.abs(regionReplicaHostCostFunction.cost()); + boolean colocatedAtHost = hostCost > CostFunction.getCostEpsilon(hostCost); + if (colocatedAtHost) { + return true; + } + LOG.trace("No host colocation detected with host cost={}", hostCost); + } + return false; + } + + private boolean areSomeRegionReplicasColocatedOnRack(BalancerClusterState c) { + if (c.numRacks >= c.maxReplicas) { + regionReplicaRackCostFunction.prepare(c); + double rackCost = Math.abs(regionReplicaRackCostFunction.cost()); + boolean colocatedAtRack = rackCost > CostFunction.getCostEpsilon(rackCost); + if (colocatedAtRack) { + return true; + } + LOG.trace("No rack colocation detected with rack cost={}", rackCost); + } else { + LOG.trace("Rack colocation is inevitable with fewer racks than replicas, " + + "so we won't bother checking"); + } + return false; } private String getBalanceReason(double total, double sumMultiplier) { @@ -361,12 +392,19 @@ boolean needsBalance(TableName tableName, BalancerClusterState cluster) { + " < MIN_SERVER_BALANCE(" + MIN_SERVER_BALANCE + ")", null); return false; } - if (areSomeRegionReplicasColocated(cluster)) { + + if (areSomeRegionReplicasColocatedOnHost(cluster)) { LOG.info("Running balancer because at least one server hosts replicas of the same region." + " function cost={}", functionCost()); return true; } + if (areSomeRegionReplicasColocatedOnRack(cluster)) { + LOG.info("Running balancer because at least one rack hosts replicas of the same region." + + " function cost={}", functionCost()); + return true; + } + if (idleRegionServerExist(cluster)) { LOG.info("Running balancer because cluster has idle server(s)." + " function cost={}", functionCost()); @@ -409,8 +447,9 @@ boolean needsBalance(TableName tableName, BalancerClusterState cluster) { @RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") - BalanceAction nextAction(BalancerClusterState cluster) { - return getRandomGenerator().generate(cluster); + Pair nextAction(BalancerClusterState cluster) { + CandidateGenerator generator = getRandomGenerator(); + return Pair.newPair(generator, generator.generate(cluster)); } /** @@ -419,24 +458,40 @@ BalanceAction nextAction(BalancerClusterState cluster) { * all cost functions that benefit from it. */ protected CandidateGenerator getRandomGenerator() { - double sum = 0; - for (int i = 0; i < weightsOfGenerators.length; i++) { - sum += weightsOfGenerators[i]; - weightsOfGenerators[i] = sum; - } - if (sum == 0) { - return candidateGenerators.get(0); - } - for (int i = 0; i < weightsOfGenerators.length; i++) { - weightsOfGenerators[i] /= sum; + List> generatorClasses = shuffledGeneratorClasses.get(); + List partialSums = new ArrayList<>(generatorClasses.size()); + double sum = 0.0; + for (Class clazz : generatorClasses) { + double weight = weightsOfGenerators.getOrDefault(clazz, 0.0); + sum += weight; + partialSums.add(sum); + } + + // If the sum of all weights is zero, fall back to any generator + if (sum == 0.0) { + // If no generators at all, fail fast or throw + if (generatorClasses.isEmpty()) { + throw new IllegalStateException("No candidate generators available"); + } + return candidateGenerators + .get(generatorClasses.get(ThreadLocalRandom.current().nextInt(candidateGenerators.size()))); } + double rand = ThreadLocalRandom.current().nextDouble(); - for (int i = 0; i < weightsOfGenerators.length; i++) { - if (rand <= weightsOfGenerators[i]) { - return candidateGenerators.get(i); + // Normalize partial sums so that the last one should be exactly 1.0 + for (int i = 0; i < partialSums.size(); i++) { + partialSums.set(i, partialSums.get(i) / sum); + } + + // Generate a random number and pick the first generator whose partial sum is >= rand + for (int i = 0; i < partialSums.size(); i++) { + if (rand <= partialSums.get(i)) { + return candidateGenerators.get(generatorClasses.get(i)); } } - return candidateGenerators.get(candidateGenerators.size() - 1); + + // Fallback: if for some reason we didn't return above, return the last generator + return candidateGenerators.get(generatorClasses.get(generatorClasses.size() - 1)); } @RestrictedApi(explanation = "Should only be called in tests", link = "", @@ -521,9 +576,12 @@ protected List balanceTable(TableName tableName, final String initFunctionTotalCosts = totalCostsPerFunc(); // Perform a stochastic walk to see if we can get a good fit. long step; - + Map, Long> generatorToStepCount = new HashMap<>(); + Map, Long> generatorToApprovedActionCount = new HashMap<>(); for (step = 0; step < computedMaxSteps; step++) { - BalanceAction action = nextAction(cluster); + Pair nextAction = nextAction(cluster); + CandidateGenerator generator = nextAction.getFirst(); + BalanceAction action = nextAction.getSecond(); if (action.getType() == BalanceAction.Type.NULL) { continue; @@ -531,12 +589,14 @@ protected List balanceTable(TableName tableName, cluster.doAction(action); updateCostsAndWeightsWithAction(cluster, action); + generatorToStepCount.merge(generator.getClass(), 1L, Long::sum); newCost = computeCost(cluster, currentCost); // Should this be kept? if (newCost < currentCost) { currentCost = newCost; + generatorToApprovedActionCount.merge(generator.getClass(), 1L, Long::sum); // save for JMX curOverallCost = currentCost; @@ -555,6 +615,14 @@ protected List balanceTable(TableName tableName, } long endTime = EnvironmentEdgeManager.currentTime(); + StringBuilder logMessage = new StringBuilder("CandidateGenerator activity summary:\n"); + generatorToStepCount.forEach((generator, count) -> { + long approvals = generatorToApprovedActionCount.getOrDefault(generator, 0L); + logMessage.append(String.format(" - %s: %d steps, %d approvals%n", generator.getSimpleName(), + count, approvals)); + }); + LOG.info(logMessage.toString()); + metricsBalancer.balanceCluster(endTime - startTime); if (initCost > currentCost) { @@ -748,7 +816,10 @@ private void updateRegionLoad() { allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") void initCosts(BalancerClusterState cluster) { // Initialize the weights of generator every time - weightsOfGenerators = new double[this.candidateGenerators.size()]; + weightsOfGenerators = new HashMap<>(this.candidateGenerators.size()); + for (Class clazz : candidateGenerators.keySet()) { + weightsOfGenerators.put(clazz, 0.0); + } for (CostFunction c : costFunctions) { c.prepare(cluster); c.updateWeight(weightsOfGenerators); @@ -762,8 +833,8 @@ void initCosts(BalancerClusterState cluster) { allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") void updateCostsAndWeightsWithAction(BalancerClusterState cluster, BalanceAction action) { // Reset all the weights to 0 - for (int i = 0; i < weightsOfGenerators.length; i++) { - weightsOfGenerators[i] = 0; + for (Class clazz : candidateGenerators.keySet()) { + weightsOfGenerators.put(clazz, 0.0); } for (CostFunction c : costFunctions) { if (c.isNeeded()) { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java index cc16cfe2ec83..d20d003b5ff3 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java @@ -488,7 +488,7 @@ public void testCostAfterUndoAction() { loadBalancer.initCosts(cluster); for (int i = 0; i != runs; ++i) { final double expectedCost = loadBalancer.computeCost(cluster, Double.MAX_VALUE); - BalanceAction action = loadBalancer.nextAction(cluster); + BalanceAction action = loadBalancer.nextAction(cluster).getSecond(); cluster.doAction(action); loadBalancer.updateCostsAndWeightsWithAction(cluster, action); BalanceAction undoAction = action.undoAction(); diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java index 2b2c56aa99ae..50d1b0e84cf9 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java @@ -52,7 +52,7 @@ public class TestStochasticLoadBalancerBalanceCluster extends StochasticBalancer */ @Test public void testBalanceCluster() throws Exception { - setMaxRunTime(Duration.ofMillis(1500)); + setMaxRunTime(Duration.ofMillis(2500)); loadBalancer.onConfigurationChange(conf); for (int[] mockCluster : clusterStateMocks) { Map> servers = mockClusterServers(mockCluster); diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java index 9d873070a600..37870ee127bb 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java @@ -41,6 +41,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; + @Category({ MasterTests.class, LargeTests.class }) public class TestStochasticLoadBalancerRegionReplica extends StochasticBalancerTestBase { @@ -136,7 +138,7 @@ public void testReplicaCostForReplicas() { } @Test - public void testNeedsBalanceForColocatedReplicas() { + public void testNeedsBalanceForColocatedReplicasOnHost() { // check for the case where there are two hosts and with one rack, and where // both the replicas are hosted on the same server List regions = randomRegions(1); @@ -148,20 +150,50 @@ public void testNeedsBalanceForColocatedReplicas() { // until the step above s1 holds two replicas of a region regions = randomRegions(1); map.put(s2, regions); - assertTrue(loadBalancer.needsBalance(HConstants.ENSEMBLE_TABLE_NAME, - new BalancerClusterState(map, null, null, null))); - // check for the case where there are two hosts on the same rack and there are two racks - // and both the replicas are on the same rack - map.clear(); - regions = randomRegions(1); + BalancerClusterState cluster = + new BalancerClusterState(map, null, null, new ForTestRackManagerOne()); + loadBalancer.initCosts(cluster); + assertTrue(loadBalancer.needsBalance(HConstants.ENSEMBLE_TABLE_NAME, cluster)); + } + + @Test + public void testNeedsBalanceForColocatedReplicasOnRack() { + // Three hosts, two racks, and two replicas for a region. This should be balanced + List regions = randomRegions(1); + ServerName s1 = ServerName.valueOf("host1", 1000, 11111); + ServerName s2 = ServerName.valueOf("host11", 1000, 11111); + Map> map = new HashMap<>(); List regionsOnS2 = new ArrayList<>(1); regionsOnS2.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(0), 1)); map.put(s1, regions); map.put(s2, regionsOnS2); // add another server so that the cluster has some host on another rack map.put(ServerName.valueOf("host2", 1000, 11111), randomRegions(1)); - assertFalse(loadBalancer.needsBalance(HConstants.ENSEMBLE_TABLE_NAME, - new BalancerClusterState(map, null, null, new ForTestRackManagerOne()))); + BalancerClusterState cluster = + new BalancerClusterState(map, null, null, new ForTestRackManagerOne()); + loadBalancer.initCosts(cluster); + assertTrue(loadBalancer.needsBalance(HConstants.ENSEMBLE_TABLE_NAME, cluster)); + } + + @Test + public void testNoNeededBalanceForColocatedReplicasTooFewRacks() { + // Three hosts, two racks, and three replicas for a region. This cannot be balanced + List regions = randomRegions(1); + ServerName s1 = ServerName.valueOf("host1", 1000, 11111); + ServerName s2 = ServerName.valueOf("host11", 1000, 11111); + ServerName s3 = ServerName.valueOf("host2", 1000, 11111); + Map> map = new HashMap<>(); + List regionsOnS2 = new ArrayList<>(1); + regionsOnS2.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(0), 1)); + map.put(s1, regions); + map.put(s2, regionsOnS2); + // there are 3 replicas for region 0, but only add a second rack + map.put(s3, ImmutableList.of(RegionReplicaUtil.getRegionInfoForReplica(regions.get(0), 2))); + BalancerClusterState cluster = + new BalancerClusterState(map, null, null, new ForTestRackManagerOne()); + loadBalancer.initCosts(cluster); + // Should be false because there aren't enough racks + assertFalse(loadBalancer.needsBalance(HConstants.ENSEMBLE_TABLE_NAME, cluster)); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadOnlyFavoredStochasticBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadOnlyFavoredStochasticBalancer.java index ba6ee4af843f..d658f7cfa167 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadOnlyFavoredStochasticBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadOnlyFavoredStochasticBalancer.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.master.balancer; -import java.util.ArrayList; -import java.util.List; +import java.util.HashMap; +import java.util.Map; /** * Used for FavoredNode unit tests @@ -26,9 +26,10 @@ public class LoadOnlyFavoredStochasticBalancer extends FavoredStochasticBalancer { @Override - protected List createCandidateGenerators() { - List fnPickers = new ArrayList<>(1); - fnPickers.add(new FavoredNodeLoadPicker()); + protected Map, CandidateGenerator> + createCandidateGenerators() { + Map, CandidateGenerator> fnPickers = new HashMap<>(1); + fnPickers.put(FavoredNodeLoadPicker.class, new FavoredNodeLoadPicker()); return fnPickers; } }