diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index 5f44fd1842fb..c9cc906f16c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -167,12 +167,15 @@ public RegionInfo getDaughterTwoRI() { return daughterTwoRI; } + private boolean hasBestSplitRow() { + return bestSplitRow != null && bestSplitRow.length > 0; + } + /** * Check whether the region is splittable * @param env MasterProcedureEnv * @param regionToSplit parent Region to be split * @param splitRow if splitRow is not specified, will first try to get bestSplitRow from RS - * @throws IOException */ private void checkSplittable(final MasterProcedureEnv env, final RegionInfo regionToSplit, final byte[] splitRow) throws IOException { @@ -187,19 +190,20 @@ private void checkSplittable(final MasterProcedureEnv env, boolean splittable = false; if (node != null) { try { - if (bestSplitRow == null || bestSplitRow.length == 0) { - LOG - .info("splitKey isn't explicitly specified, will try to find a best split key from RS"); - } - // Always set bestSplitRow request as true here, - // need to call Region#checkSplit to check it splittable or not - GetRegionInfoResponse response = AssignmentManagerUtil.getRegionInfoResponse(env, - node.getRegionLocation(), node.getRegionInfo(), true); - if(bestSplitRow == null || bestSplitRow.length == 0) { - bestSplitRow = response.hasBestSplitRow() ? response.getBestSplitRow().toByteArray() : null; + GetRegionInfoResponse response; + if (!hasBestSplitRow()) { + LOG.info( + "{} splitKey isn't explicitly specified, will try to find a best split key from RS {}", + node.getRegionInfo().getRegionNameAsString(), node.getRegionLocation()); + response = AssignmentManagerUtil.getRegionInfoResponse(env, node.getRegionLocation(), + node.getRegionInfo(), true); + bestSplitRow = + response.hasBestSplitRow() ? response.getBestSplitRow().toByteArray() : null; + } else { + response = AssignmentManagerUtil.getRegionInfoResponse(env, node.getRegionLocation(), + node.getRegionInfo(), false); } splittable = response.hasSplittable() && response.getSplittable(); - if (LOG.isDebugEnabled()) { LOG.debug("Splittable=" + splittable + " " + node.toShortString()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java index 659983b6c828..7516c54e625b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java @@ -191,7 +191,7 @@ public synchronized boolean requestSplit(final Region r) { HRegion hr = (HRegion)r; try { if (shouldSplitRegion() && hr.getCompactPriority() >= PRIORITY_USER) { - byte[] midKey = hr.checkSplit(); + byte[] midKey = hr.checkSplit().orElse(null); if (midKey != null) { requestSplit(r, midKey); return true; @@ -216,9 +216,6 @@ public synchronized void requestSplit(final Region r, byte[] midKey, User user) if (midKey == null) { LOG.debug("Region " + r.getRegionInfo().getRegionNameAsString() + " not splittable because midkey=null"); - if (((HRegion)r).shouldForceSplit()) { - ((HRegion)r).clearSplit(); - } return; } try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java index ebb0b2ab12f5..8ad81267519b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java @@ -68,7 +68,6 @@ protected void configureForRegion(HRegion region) { @Override protected boolean shouldSplit() { - boolean force = region.shouldForceSplit(); boolean foundABigStore = false; for (HStore store : region.getStores()) { @@ -84,7 +83,7 @@ protected boolean shouldSplit() { } } - return foundABigStore || force; + return foundABigStore; } long getDesiredMaxFileSize() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DisabledRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DisabledRegionSplitPolicy.java index 0cb784b4b298..e6dae254dc43 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DisabledRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DisabledRegionSplitPolicy.java @@ -22,15 +22,20 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; /** - * A {@link RegionSplitPolicy} that disables region splits. - * This should be used with care, since it will disable automatic sharding. - * Most of the time, using {@link ConstantSizeRegionSplitPolicy} with a - * large region size (10GB, etc) is safer. + * A {@link RegionSplitPolicy} that disables region splits. This should be used with care, since it + * will disable automatic sharding. Most of the time, using {@link ConstantSizeRegionSplitPolicy} + * with a large region size (10GB, etc) is safer. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class DisabledRegionSplitPolicy extends RegionSplitPolicy { + @Override protected boolean shouldSplit() { return false; } + + @Override + protected boolean canSplit() { + return false; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index dd5d6c8c512c..40a009c2c7c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -696,8 +696,6 @@ void sawNoSuchFamily() { // Stop updates lock private final ReentrantReadWriteLock updatesLock = new ReentrantReadWriteLock(); - private boolean splitRequest; - private byte[] explicitSplitPoint = null; private final MultiVersionConcurrencyControl mvcc; @@ -1478,7 +1476,7 @@ public boolean isAvailable() { @Override public boolean isSplittable() { - return isAvailable() && !hasReferences(); + return splitPolicy.canSplit(); } @Override @@ -1977,7 +1975,8 @@ public void setMobFileCache(MobFileCache mobFileCache) { /** * @return split policy for this region. */ - public RegionSplitPolicy getSplitPolicy() { + @VisibleForTesting + RegionSplitPolicy getSplitPolicy() { return this.splitPolicy; } @@ -8408,9 +8407,9 @@ private static List sort(List cells, final CellComparator comparator public static final long FIXED_OVERHEAD = ClassSize.align( ClassSize.OBJECT + - ClassSize.ARRAY + - 55 * ClassSize.REFERENCE + 3 * Bytes.SIZEOF_INT + - (15 * Bytes.SIZEOF_LONG) + + 56 * ClassSize.REFERENCE + + 3 * Bytes.SIZEOF_INT + + 14 * Bytes.SIZEOF_LONG + 3 * Bytes.SIZEOF_BOOLEAN); // woefully out of date - currently missing: @@ -8537,50 +8536,26 @@ public void run(Message message) { return responseBuilder.build(); } - boolean shouldForceSplit() { - return this.splitRequest; - } - - byte[] getExplicitSplitPoint() { - return this.explicitSplitPoint; - } - - void forceSplit(byte[] sp) { - // This HRegion will go away after the forced split is successful - // But if a forced split fails, we need to clear forced split. - this.splitRequest = true; - if (sp != null) { - this.explicitSplitPoint = sp; - } - } - - void clearSplit() { - this.splitRequest = false; - this.explicitSplitPoint = null; + public Optional checkSplit() { + return checkSplit(false); } /** - * Return the splitpoint. null indicates the region isn't splittable - * If the splitpoint isn't explicitly specified, it will go over the stores - * to find the best splitpoint. Currently the criteria of best splitpoint - * is based on the size of the store. + * Return the split point. An empty result indicates the region isn't splittable. */ - public byte[] checkSplit() { + public Optional checkSplit(boolean force) { // Can't split META if (this.getRegionInfo().isMetaRegion()) { - if (shouldForceSplit()) { - LOG.warn("Cannot split meta region in HBase 0.20 and above"); - } - return null; + return Optional.empty(); } // Can't split a region that is closing. if (this.isClosing()) { - return null; + return Optional.empty(); } - if (!splitPolicy.shouldSplit()) { - return null; + if (!force && !splitPolicy.shouldSplit()) { + return Optional.empty(); } byte[] ret = splitPolicy.getSplitPoint(); @@ -8590,10 +8565,12 @@ public byte[] checkSplit() { checkRow(ret, "calculated split"); } catch (IOException e) { LOG.error("Ignoring invalid split for region {}", this, e); - return null; + return Optional.empty(); } + return Optional.of(ret); + } else { + return Optional.empty(); } - return ret; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java index 21446d2cf50b..c40d6aafd622 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java @@ -70,7 +70,6 @@ protected void configureForRegion(HRegion region) { @Override protected boolean shouldSplit() { - boolean force = region.shouldForceSplit(); boolean foundABigStore = false; // Get count of regions that have the same common table as this.region int tableRegionsCount = getCountOfCommonTableRegions(); @@ -95,7 +94,7 @@ protected boolean shouldSplit() { } } - return foundABigStore || force; + return foundABigStore; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index c133a7a6429d..a2ea802a7dfd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -620,7 +620,7 @@ private boolean flushRegion(HRegion region, boolean emergencyFlush, FlushResult flushResult = region.flushcache(families, false, tracker); boolean shouldCompact = flushResult.isCompactionNeeded(); // We just want to check the size - boolean shouldSplit = region.checkSplit() != null; + boolean shouldSplit = region.checkSplit().isPresent(); if (shouldSplit) { this.server.compactSplitThread.requestSplit(region); } else if (shouldCompact) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 44f4d026d083..c8ec303fe57e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1837,35 +1837,30 @@ public GetOnlineRegionResponse getOnlineRegion(final RpcController controller, @Override @QosPriority(priority=HConstants.ADMIN_QOS) public GetRegionInfoResponse getRegionInfo(final RpcController controller, - final GetRegionInfoRequest request) throws ServiceException { + final GetRegionInfoRequest request) throws ServiceException { try { checkOpen(); requestCount.increment(); HRegion region = getRegion(request.getRegion()); RegionInfo info = region.getRegionInfo(); - byte[] bestSplitRow = null; - boolean shouldSplit = true; + byte[] bestSplitRow; if (request.hasBestSplitRow() && request.getBestSplitRow()) { - HRegion r = region; - region.startRegionOperation(Operation.SPLIT_REGION); - r.forceSplit(null); - // Even after setting force split if split policy says no to split then we should not split. - shouldSplit = region.getSplitPolicy().shouldSplit() && !info.isMetaRegion(); - bestSplitRow = r.checkSplit(); + bestSplitRow = region.checkSplit(true).orElse(null); // when all table data are in memstore, bestSplitRow = null // try to flush region first - if(bestSplitRow == null) { - r.flush(true); - bestSplitRow = r.checkSplit(); + if (bestSplitRow == null) { + region.flush(true); + bestSplitRow = region.checkSplit(true).orElse(null); } - r.clearSplit(); + } else { + bestSplitRow = null; } GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); builder.setRegionInfo(ProtobufUtil.toRegionInfo(info)); if (request.hasCompactionState() && request.getCompactionState()) { builder.setCompactionState(ProtobufUtil.createCompactionState(region.getCompactionState())); } - builder.setSplittable(region.isSplittable() && shouldSplit); + builder.setSplittable(region.isSplittable()); builder.setMergeable(region.isMergeable()); if (request.hasBestSplitRow() && request.getBestSplitRow() && bestSplitRow != null) { builder.setBestSplitRow(UnsafeByteOperations.unsafeWrap(bestSplitRow)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java index e0ec62bd64e2..a439e407bf8f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java @@ -70,16 +70,20 @@ protected void configureForRegion(HRegion region) { */ protected abstract boolean shouldSplit(); + /** + * @return {@code true} if the specified region can be split. + */ + protected boolean canSplit() { + return !region.getRegionInfo().isMetaRegion() && region.isAvailable() && + !region.hasReferences(); + } + /** * @return the key at which the region should be split, or null * if it cannot be split. This will only be called if shouldSplit * previously returned true. */ protected byte[] getSplitPoint() { - byte[] explicitSplitPoint = this.region.getExplicitSplitPoint(); - if (explicitSplitPoint != null) { - return explicitSplitPoint; - } List stores = region.getStores(); byte[] splitPointFromLargestStore = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 958809b0051b..e73dce56eb08 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -3139,9 +3139,8 @@ public void unassignRegionByRow(byte[] row, RegionLocator table) throws IOExcept unassignRegion(hrl.getRegion().getRegionName()); } - /* + /** * Retrieves a splittable region randomly from tableName - * * @param tableName name of table * @param maxAttempts maximum number of attempts, unlimited for value of -1 * @return the HRegion chosen, null if none was found within limit of maxAttempts @@ -3164,15 +3163,14 @@ public HRegion getSplittableRegion(TableName tableName, int maxAttempts) { if (regCount > 0) { idx = random.nextInt(regCount); // if we have just tried this region, there is no need to try again - if (attempted.contains(idx)) + if (attempted.contains(idx)) { continue; - try { - regions.get(idx).checkSplit(); - return regions.get(idx); - } catch (Exception ex) { - LOG.warn("Caught exception", ex); - attempted.add(idx); } + HRegion region = regions.get(idx); + if (region.checkSplit().isPresent()) { + return region; + } + attempted.add(idx); } attempts++; } while (maxAttempts == -1 || attempts < maxAttempts); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index c35703805afa..c01edaa0467e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -70,6 +71,7 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Iterators; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @@ -121,7 +123,6 @@ public void testCanSplitJustAfterASplit() throws Exception { admin.createTable(htd); TEST_UTIL.loadTable(source, fam); compactSplit.setCompactionsEnabled(false); - TEST_UTIL.getHBaseCluster().getRegions(tableName).get(0).forceSplit(null); admin.split(tableName); TEST_UTIL.waitFor(60000, () -> TEST_UTIL.getHBaseCluster().getRegions(tableName).size() == 2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index 4676aa0b29b0..72e0e8ad4ef0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -873,9 +873,6 @@ public void testMultipleTimestamps() throws IOException { public void testSplitWithEmptyColFam() throws IOException { init(this.name.getMethodName()); assertFalse(store.getSplitPoint().isPresent()); - store.getHRegion().forceSplit(null); - assertFalse(store.getSplitPoint().isPresent()); - store.getHRegion().clearSplit(); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java index 09c141f7129b..5ca693c5a939 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java @@ -21,6 +21,9 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.IOException; import java.util.ArrayList; @@ -30,76 +33,64 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.Before; import org.junit.ClassRule; -import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; -import org.mockito.Mockito; -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestRegionSplitPolicy { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionSplitPolicy.class); + HBaseClassTestRule.forClass(TestRegionSplitPolicy.class); private Configuration conf; - private HTableDescriptor htd; private HRegion mockRegion; private List stores; private static final TableName TABLENAME = TableName.valueOf("t"); - @Rule - public TestName name = new TestName(); - @Before public void setupMocks() { conf = HBaseConfiguration.create(); RegionInfo hri = RegionInfoBuilder.newBuilder(TABLENAME).build(); - htd = new HTableDescriptor(TABLENAME); - mockRegion = Mockito.mock(HRegion.class); - Mockito.doReturn(htd).when(mockRegion).getTableDescriptor(); - Mockito.doReturn(hri).when(mockRegion).getRegionInfo(); + mockRegion = mock(HRegion.class); + doReturn(hri).when(mockRegion).getRegionInfo(); stores = new ArrayList<>(); - Mockito.doReturn(stores).when(mockRegion).getStores(); + doReturn(stores).when(mockRegion).getStores(); } @Test public void testForceSplitRegionWithReference() throws IOException { - htd.setMaxFileSize(1024L); + TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(1024L).build(); + doReturn(td).when(mockRegion).getTableDescriptor(); // Add a store above the requisite size. Should split. - HStore mockStore = Mockito.mock(HStore.class); - Mockito.doReturn(2000L).when(mockStore).getSize(); + HStore mockStore = mock(HStore.class); + doReturn(2000L).when(mockStore).getSize(); // Act as if there's a reference file or some other reason it can't split. // This should prevent splitting even though it's big enough. - Mockito.doReturn(false).when(mockStore).canSplit(); + doReturn(false).when(mockStore).canSplit(); stores.add(mockStore); conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName()); ConstantSizeRegionSplitPolicy policy = - (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create(mockRegion, conf); - assertFalse(policy.shouldSplit()); - Mockito.doReturn(true).when(mockRegion).shouldForceSplit(); + (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); assertFalse(policy.shouldSplit()); - Mockito.doReturn(false).when(mockRegion).shouldForceSplit(); conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, IncreasingToUpperBoundRegionSplitPolicy.class.getName()); policy = (IncreasingToUpperBoundRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); assertFalse(policy.shouldSplit()); - Mockito.doReturn(true).when(mockRegion).shouldForceSplit(); - assertFalse(policy.shouldSplit()); } @Test @@ -109,31 +100,32 @@ public void testIncreasingToUpperBoundRegionSplitPolicy() throws IOException { IncreasingToUpperBoundRegionSplitPolicy.class.getName()); // Now make it so the mock region has a RegionServerService that will // return 'online regions'. - RegionServerServices rss = Mockito.mock(RegionServerServices.class); + RegionServerServices rss = mock(RegionServerServices.class); final List regions = new ArrayList<>(); - Mockito.doReturn(regions).when(rss).getRegions(TABLENAME); - Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss); + doReturn(regions).when(rss).getRegions(TABLENAME); + when(mockRegion.getRegionServerServices()).thenReturn(rss); // Set max size for this 'table'. long maxSplitSize = 1024L; - htd.setMaxFileSize(maxSplitSize); - // Set flush size to 1/8. IncreasingToUpperBoundRegionSplitPolicy + // Set flush size to 1/8. IncreasingToUpperBoundRegionSplitPolicy // grows by the cube of the number of regions times flushsize each time. - long flushSize = maxSplitSize/8; + long flushSize = maxSplitSize / 8; conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSize); - htd.setMemStoreFlushSize(flushSize); + TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(maxSplitSize) + .setMemStoreFlushSize(flushSize).build(); + doReturn(td).when(mockRegion).getTableDescriptor(); // If RegionServerService with no regions in it -- 'online regions' == 0 -- // then IncreasingToUpperBoundRegionSplitPolicy should act like a // ConstantSizePolicy IncreasingToUpperBoundRegionSplitPolicy policy = - (IncreasingToUpperBoundRegionSplitPolicy)RegionSplitPolicy.create(mockRegion, conf); + (IncreasingToUpperBoundRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); doConstantSizePolicyTests(policy); - // Add a store in excess of split size. Because there are "no regions" + // Add a store in excess of split size. Because there are "no regions" // on this server -- rss.getOnlineRegions is 0 -- then we should split // like a constantsizeregionsplitpolicy would - HStore mockStore = Mockito.mock(HStore.class); - Mockito.doReturn(2000L).when(mockStore).getSize(); - Mockito.doReturn(true).when(mockStore).canSplit(); + HStore mockStore = mock(HStore.class); + doReturn(2000L).when(mockStore).getSize(); + doReturn(true).when(mockStore).canSplit(); stores.add(mockStore); // It should split assertTrue(policy.shouldSplit()); @@ -141,18 +133,18 @@ public void testIncreasingToUpperBoundRegionSplitPolicy() throws IOException { // Now test that we increase our split size as online regions for a table // grows. With one region, split size should be flushsize. regions.add(mockRegion); - Mockito.doReturn(flushSize).when(mockStore).getSize(); + doReturn(flushSize).when(mockStore).getSize(); // Should not split since store is flush size. assertFalse(policy.shouldSplit()); // Set size of store to be > 2*flush size and we should split - Mockito.doReturn(flushSize*2 + 1).when(mockStore).getSize(); + doReturn(flushSize * 2 + 1).when(mockStore).getSize(); assertTrue(policy.shouldSplit()); // Add another region to the 'online regions' on this server and we should // now be no longer be splittable since split size has gone up. regions.add(mockRegion); assertFalse(policy.shouldSplit()); // make sure its just over; verify it'll split - Mockito.doReturn((long)(maxSplitSize * 1.25 + 1)).when(mockStore).getSize(); + doReturn((long) (maxSplitSize * 1.25 + 1)).when(mockStore).getSize(); assertTrue(policy.shouldSplit()); // Finally assert that even if loads of regions, we'll split at max size @@ -163,41 +155,39 @@ public void testIncreasingToUpperBoundRegionSplitPolicy() throws IOException { @Test public void testBusyRegionSplitPolicy() throws Exception { - conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, - BusyRegionSplitPolicy.class.getName()); + doReturn(TableDescriptorBuilder.newBuilder(TABLENAME).build()).when(mockRegion) + .getTableDescriptor(); + conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, BusyRegionSplitPolicy.class.getName()); conf.setLong("hbase.busy.policy.minAge", 1000000L); conf.setFloat("hbase.busy.policy.blockedRequests", 0.1f); - RegionServerServices rss = Mockito.mock(RegionServerServices.class); + RegionServerServices rss = mock(RegionServerServices.class); final List regions = new ArrayList<>(); - Mockito.doReturn(regions).when(rss).getRegions(TABLENAME); - Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss); - Mockito.when(mockRegion.getBlockedRequestsCount()).thenReturn(0L); - Mockito.when(mockRegion.getWriteRequestsCount()).thenReturn(0L); - + doReturn(regions).when(rss).getRegions(TABLENAME); + when(mockRegion.getRegionServerServices()).thenReturn(rss); + when(mockRegion.getBlockedRequestsCount()).thenReturn(0L); + when(mockRegion.getWriteRequestsCount()).thenReturn(0L); BusyRegionSplitPolicy policy = - (BusyRegionSplitPolicy)RegionSplitPolicy.create(mockRegion, conf); + (BusyRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); - Mockito.when(mockRegion.getBlockedRequestsCount()).thenReturn(10L); - Mockito.when(mockRegion.getWriteRequestsCount()).thenReturn(10L); + when(mockRegion.getBlockedRequestsCount()).thenReturn(10L); + when(mockRegion.getWriteRequestsCount()).thenReturn(10L); // Not enough time since region came online assertFalse(policy.shouldSplit()); - // Reset min age for split to zero conf.setLong("hbase.busy.policy.minAge", 0L); // Aggregate over 500 ms periods conf.setLong("hbase.busy.policy.aggWindow", 500L); - policy = - (BusyRegionSplitPolicy)RegionSplitPolicy.create(mockRegion, conf); + policy = (BusyRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); long start = EnvironmentEdgeManager.currentTime(); - Mockito.when(mockRegion.getBlockedRequestsCount()).thenReturn(10L); - Mockito.when(mockRegion.getWriteRequestsCount()).thenReturn(20L); + when(mockRegion.getBlockedRequestsCount()).thenReturn(10L); + when(mockRegion.getWriteRequestsCount()).thenReturn(20L); Thread.sleep(300); assertFalse(policy.shouldSplit()); - Mockito.when(mockRegion.getBlockedRequestsCount()).thenReturn(12L); - Mockito.when(mockRegion.getWriteRequestsCount()).thenReturn(30L); + when(mockRegion.getBlockedRequestsCount()).thenReturn(12L); + when(mockRegion.getWriteRequestsCount()).thenReturn(30L); Thread.sleep(2); // Enough blocked requests since last time, but aggregate blocked request // rate over last 500 ms is still low, because major portion of the window is constituted @@ -205,34 +195,33 @@ public void testBusyRegionSplitPolicy() throws Exception { if (EnvironmentEdgeManager.currentTime() - start < 500) { assertFalse(policy.shouldSplit()); } - Mockito.when(mockRegion.getBlockedRequestsCount()).thenReturn(14L); - Mockito.when(mockRegion.getWriteRequestsCount()).thenReturn(40L); + when(mockRegion.getBlockedRequestsCount()).thenReturn(14L); + when(mockRegion.getWriteRequestsCount()).thenReturn(40L); Thread.sleep(200); assertTrue(policy.shouldSplit()); } private void assertWithinJitter(long maxSplitSize, long sizeToCheck) { assertTrue("Size greater than lower bound of jitter", - (long)(maxSplitSize * 0.75) <= sizeToCheck); - assertTrue("Size less than upper bound of jitter", - (long)(maxSplitSize * 1.25) >= sizeToCheck); + (long) (maxSplitSize * 0.75) <= sizeToCheck); + assertTrue("Size less than upper bound of jitter", (long) (maxSplitSize * 1.25) >= sizeToCheck); } @Test public void testCreateDefault() throws IOException { conf.setLong(HConstants.HREGION_MAX_FILESIZE, 1234L); - + TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME).build(); + doReturn(td).when(mockRegion).getTableDescriptor(); // Using a default HTD, should pick up the file size from // configuration. ConstantSizeRegionSplitPolicy policy = - (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create( - mockRegion, conf); + (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); assertWithinJitter(1234L, policy.getDesiredMaxFileSize()); // If specified in HTD, should use that - htd.setMaxFileSize(9999L); - policy = (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create( - mockRegion, conf); + td = TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(9999L).build(); + doReturn(td).when(mockRegion).getTableDescriptor(); + policy = (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); assertWithinJitter(9999L, policy.getDesiredMaxFileSize()); } @@ -241,75 +230,58 @@ public void testCreateDefault() throws IOException { */ @Test public void testCustomPolicy() throws IOException { - HTableDescriptor myHtd = new HTableDescriptor(TableName.valueOf(name.getMethodName())); - myHtd.setValue(HTableDescriptor.SPLIT_POLICY, - KeyPrefixRegionSplitPolicy.class.getName()); - myHtd.setValue(KeyPrefixRegionSplitPolicy.PREFIX_LENGTH_KEY, String.valueOf(2)); - - HRegion myMockRegion = Mockito.mock(HRegion.class); - Mockito.doReturn(myHtd).when(myMockRegion).getTableDescriptor(); - Mockito.doReturn(stores).when(myMockRegion).getStores(); - - HStore mockStore = Mockito.mock(HStore.class); - Mockito.doReturn(2000L).when(mockStore).getSize(); - Mockito.doReturn(true).when(mockStore).canSplit(); - Mockito.doReturn(Optional.of(Bytes.toBytes("abcd"))).when(mockStore).getSplitPoint(); - stores.add(mockStore); + TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME) + .setRegionSplitPolicyClassName(KeyPrefixRegionSplitPolicy.class.getName()) + .setValue(KeyPrefixRegionSplitPolicy.PREFIX_LENGTH_KEY, "2").build(); - KeyPrefixRegionSplitPolicy policy = (KeyPrefixRegionSplitPolicy) RegionSplitPolicy - .create(myMockRegion, conf); + doReturn(td).when(mockRegion).getTableDescriptor(); - assertEquals("ab", Bytes.toString(policy.getSplitPoint())); - - Mockito.doReturn(true).when(myMockRegion).shouldForceSplit(); - Mockito.doReturn(Bytes.toBytes("efgh")).when(myMockRegion) - .getExplicitSplitPoint(); + HStore mockStore = mock(HStore.class); + doReturn(2000L).when(mockStore).getSize(); + doReturn(true).when(mockStore).canSplit(); + doReturn(Optional.of(Bytes.toBytes("abcd"))).when(mockStore).getSplitPoint(); + stores.add(mockStore); - policy = (KeyPrefixRegionSplitPolicy) RegionSplitPolicy - .create(myMockRegion, conf); + KeyPrefixRegionSplitPolicy policy = + (KeyPrefixRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); - assertEquals("ef", Bytes.toString(policy.getSplitPoint())); + assertEquals("ab", Bytes.toString(policy.getSplitPoint())); } @Test public void testConstantSizePolicy() throws IOException { - htd.setMaxFileSize(1024L); + TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(1024L).build(); + doReturn(td).when(mockRegion).getTableDescriptor(); ConstantSizeRegionSplitPolicy policy = - (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create(mockRegion, conf); + (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); doConstantSizePolicyTests(policy); } /** * Run through tests for a ConstantSizeRegionSplitPolicy - * @param policy */ private void doConstantSizePolicyTests(final ConstantSizeRegionSplitPolicy policy) { // For no stores, should not split assertFalse(policy.shouldSplit()); // Add a store above the requisite size. Should split. - HStore mockStore = Mockito.mock(HStore.class); - Mockito.doReturn(2000L).when(mockStore).getSize(); - Mockito.doReturn(true).when(mockStore).canSplit(); + HStore mockStore = mock(HStore.class); + doReturn(2000L).when(mockStore).getSize(); + doReturn(true).when(mockStore).canSplit(); stores.add(mockStore); assertTrue(policy.shouldSplit()); // Act as if there's a reference file or some other reason it can't split. // This should prevent splitting even though it's big enough. - Mockito.doReturn(false).when(mockStore).canSplit(); + doReturn(false).when(mockStore).canSplit(); assertFalse(policy.shouldSplit()); // Reset splittability after above - Mockito.doReturn(true).when(mockStore).canSplit(); - - // Set to a small size but turn on forceSplit. Should result in a split. - Mockito.doReturn(true).when(mockRegion).shouldForceSplit(); - Mockito.doReturn(100L).when(mockStore).getSize(); - assertTrue(policy.shouldSplit()); + doReturn(true).when(mockStore).canSplit(); - // Turn off forceSplit, should not split - Mockito.doReturn(false).when(mockRegion).shouldForceSplit(); + // Set to a small size, should not split + doReturn(100L).when(mockStore).getSize(); assertFalse(policy.shouldSplit()); // Clear families we added above @@ -318,67 +290,56 @@ private void doConstantSizePolicyTests(final ConstantSizeRegionSplitPolicy polic @Test public void testGetSplitPoint() throws IOException { + TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME).build(); + doReturn(td).when(mockRegion).getTableDescriptor(); + ConstantSizeRegionSplitPolicy policy = - (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create(mockRegion, conf); + (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); // For no stores, should not split assertFalse(policy.shouldSplit()); assertNull(policy.getSplitPoint()); // Add a store above the requisite size. Should split. - HStore mockStore = Mockito.mock(HStore.class); - Mockito.doReturn(2000L).when(mockStore).getSize(); - Mockito.doReturn(true).when(mockStore).canSplit(); - Mockito.doReturn(Optional.of(Bytes.toBytes("store 1 split"))).when(mockStore).getSplitPoint(); + HStore mockStore = mock(HStore.class); + doReturn(2000L).when(mockStore).getSize(); + doReturn(true).when(mockStore).canSplit(); + doReturn(Optional.of(Bytes.toBytes("store 1 split"))).when(mockStore).getSplitPoint(); stores.add(mockStore); - assertEquals("store 1 split", - Bytes.toString(policy.getSplitPoint())); + assertEquals("store 1 split", Bytes.toString(policy.getSplitPoint())); // Add a bigger store. The split point should come from that one - HStore mockStore2 = Mockito.mock(HStore.class); - Mockito.doReturn(4000L).when(mockStore2).getSize(); - Mockito.doReturn(true).when(mockStore2).canSplit(); - Mockito.doReturn(Optional.of(Bytes.toBytes("store 2 split"))).when(mockStore2).getSplitPoint(); + HStore mockStore2 = mock(HStore.class); + doReturn(4000L).when(mockStore2).getSize(); + doReturn(true).when(mockStore2).canSplit(); + doReturn(Optional.of(Bytes.toBytes("store 2 split"))).when(mockStore2).getSplitPoint(); stores.add(mockStore2); - assertEquals("store 2 split", - Bytes.toString(policy.getSplitPoint())); + assertEquals("store 2 split", Bytes.toString(policy.getSplitPoint())); } @Test public void testDelimitedKeyPrefixRegionSplitPolicy() throws IOException { - HTableDescriptor myHtd = new HTableDescriptor(TableName.valueOf(name.getMethodName())); - myHtd.setValue(HTableDescriptor.SPLIT_POLICY, - DelimitedKeyPrefixRegionSplitPolicy.class.getName()); - myHtd.setValue(DelimitedKeyPrefixRegionSplitPolicy.DELIMITER_KEY, ","); - - HRegion myMockRegion = Mockito.mock(HRegion.class); - Mockito.doReturn(myHtd).when(myMockRegion).getTableDescriptor(); - Mockito.doReturn(stores).when(myMockRegion).getStores(); - - HStore mockStore = Mockito.mock(HStore.class); - Mockito.doReturn(2000L).when(mockStore).getSize(); - Mockito.doReturn(true).when(mockStore).canSplit(); - Mockito.doReturn(Optional.of(Bytes.toBytes("ab,cd"))).when(mockStore).getSplitPoint(); - stores.add(mockStore); + TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME) + .setRegionSplitPolicyClassName(DelimitedKeyPrefixRegionSplitPolicy.class.getName()) + .setValue(DelimitedKeyPrefixRegionSplitPolicy.DELIMITER_KEY, ",").build(); - DelimitedKeyPrefixRegionSplitPolicy policy = (DelimitedKeyPrefixRegionSplitPolicy) RegionSplitPolicy - .create(myMockRegion, conf); + doReturn(td).when(mockRegion).getTableDescriptor(); + doReturn(stores).when(mockRegion).getStores(); - assertEquals("ab", Bytes.toString(policy.getSplitPoint())); - - Mockito.doReturn(true).when(myMockRegion).shouldForceSplit(); - Mockito.doReturn(Bytes.toBytes("efg,h")).when(myMockRegion) - .getExplicitSplitPoint(); + HStore mockStore = mock(HStore.class); + doReturn(2000L).when(mockStore).getSize(); + doReturn(true).when(mockStore).canSplit(); + doReturn(Optional.of(Bytes.toBytes("ab,cd"))).when(mockStore).getSplitPoint(); + stores.add(mockStore); - policy = (DelimitedKeyPrefixRegionSplitPolicy) RegionSplitPolicy - .create(myMockRegion, conf); + DelimitedKeyPrefixRegionSplitPolicy policy = + (DelimitedKeyPrefixRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); - assertEquals("efg", Bytes.toString(policy.getSplitPoint())); + assertEquals("ab", Bytes.toString(policy.getSplitPoint())); - Mockito.doReturn(Bytes.toBytes("ijk")).when(myMockRegion) - .getExplicitSplitPoint(); + doReturn(Optional.of(Bytes.toBytes("ijk"))).when(mockStore).getSplitPoint(); assertEquals("ijk", Bytes.toString(policy.getSplitPoint())); } @@ -386,7 +347,9 @@ public void testDelimitedKeyPrefixRegionSplitPolicy() throws IOException { public void testConstantSizePolicyWithJitter() throws IOException { conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName()); - htd.setMaxFileSize(Long.MAX_VALUE); + TableDescriptor td = + TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(Long.MAX_VALUE).build(); + doReturn(td).when(mockRegion).getTableDescriptor(); boolean positiveJitter = false; ConstantSizeRegionSplitPolicy policy = null; while (!positiveJitter) { @@ -394,12 +357,11 @@ public void testConstantSizePolicyWithJitter() throws IOException { positiveJitter = policy.positiveJitterRate(); } // add a store - HStore mockStore = Mockito.mock(HStore.class); - Mockito.doReturn(2000L).when(mockStore).getSize(); - Mockito.doReturn(true).when(mockStore).canSplit(); + HStore mockStore = mock(HStore.class); + doReturn(2000L).when(mockStore).getSize(); + doReturn(true).when(mockStore).canSplit(); stores.add(mockStore); // Jitter shouldn't cause overflow when HTableDescriptor.MAX_FILESIZE set to Long.MAX_VALUE assertFalse(policy.shouldSplit()); } - }