diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java index c5b2fc7f5d83..a71b5d4b2fba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java @@ -20,8 +20,8 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; - -import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * With this limiter resources will be refilled only after a fixed interval of time. @@ -43,6 +43,8 @@ public class FixedIntervalRateLimiter extends RateLimiter { public static final String RATE_LIMITER_REFILL_INTERVAL_MS = "hbase.quota.rate.limiter.refill.interval.ms"; + private static final Logger LOG = LoggerFactory.getLogger(FixedIntervalRateLimiter.class); + private long nextRefillTime = -1L; private final long refillInterval; @@ -52,10 +54,14 @@ public FixedIntervalRateLimiter() { public FixedIntervalRateLimiter(long refillInterval) { super(); - Preconditions.checkArgument(getTimeUnitInMillis() >= refillInterval, - String.format("Refill interval %s must be less than or equal to TimeUnit millis %s", - refillInterval, getTimeUnitInMillis())); - this.refillInterval = refillInterval; + long timeUnit = getTimeUnitInMillis(); + if (refillInterval > timeUnit) { + LOG.warn( + "Refill interval {} is larger than time unit {}. This is invalid. " + + "Instead, we will use the time unit {} as the refill interval", + refillInterval, timeUnit, timeUnit); + } + this.refillInterval = Math.min(timeUnit, refillInterval); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java index e912fe8ee153..b0e76663455b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java @@ -130,20 +130,23 @@ private void ensureInitialized() { } private Map fetchUserQuotaStateEntries() throws IOException { - return QuotaUtil.fetchUserQuotas(rsServices.getConnection(), tableMachineQuotaFactors, - machineQuotaFactor); + return QuotaUtil.fetchUserQuotas(rsServices.getConfiguration(), rsServices.getConnection(), + tableMachineQuotaFactors, machineQuotaFactor); } private Map fetchRegionServerQuotaStateEntries() throws IOException { - return QuotaUtil.fetchRegionServerQuotas(rsServices.getConnection()); + return QuotaUtil.fetchRegionServerQuotas(rsServices.getConfiguration(), + rsServices.getConnection()); } private Map fetchTableQuotaStateEntries() throws IOException { - return QuotaUtil.fetchTableQuotas(rsServices.getConnection(), tableMachineQuotaFactors); + return QuotaUtil.fetchTableQuotas(rsServices.getConfiguration(), rsServices.getConnection(), + tableMachineQuotaFactors); } private Map fetchNamespaceQuotaStateEntries() throws IOException { - return QuotaUtil.fetchNamespaceQuotas(rsServices.getConnection(), machineQuotaFactor); + return QuotaUtil.fetchNamespaceQuotas(rsServices.getConfiguration(), rsServices.getConnection(), + machineQuotaFactor); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiterFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiterFactory.java index 762896773fc7..63d8df65d25d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiterFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiterFactory.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.quotas; +import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -25,8 +26,8 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class QuotaLimiterFactory { - public static QuotaLimiter fromThrottle(final Throttle throttle) { - return TimeBasedLimiter.fromThrottle(throttle); + public static QuotaLimiter fromThrottle(Configuration conf, final Throttle throttle) { + return TimeBasedLimiter.fromThrottle(conf, throttle); } public static QuotaLimiter update(final QuotaLimiter a, final QuotaLimiter b) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java index 61aa9d7f068f..4a0b634abec5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.quotas; +import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -57,9 +58,9 @@ public synchronized boolean isBypass() { /** * Setup the global quota information. (This operation is part of the QuotaState setup) */ - public synchronized void setQuotas(final Quotas quotas) { + public synchronized void setQuotas(Configuration conf, final Quotas quotas) { if (quotas.hasThrottle()) { - globalLimiter = QuotaLimiterFactory.fromThrottle(quotas.getThrottle()); + globalLimiter = QuotaLimiterFactory.fromThrottle(conf, quotas.getThrottle()); } else { globalLimiter = NoopQuotaLimiter.get(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java index d49ce248916c..8497f861f70c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java @@ -330,8 +330,9 @@ private static void deleteQuotas(final Connection connection, final byte[] rowKe doDelete(connection, delete); } - public static Map fetchUserQuotas(final Connection connection, - Map tableMachineQuotaFactors, double factor) throws IOException { + public static Map fetchUserQuotas(final Configuration conf, + final Connection connection, Map tableMachineQuotaFactors, double factor) + throws IOException { Map userQuotas = new HashMap<>(); try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { Scan scan = new Scan(); @@ -351,7 +352,7 @@ public static Map fetchUserQuotas(final Connection conne @Override public void visitUserQuotas(String userName, String namespace, Quotas quotas) { quotas = updateClusterQuotaToMachineQuota(quotas, factor); - quotaInfo.setQuotas(namespace, quotas); + quotaInfo.setQuotas(conf, namespace, quotas); } @Override @@ -360,13 +361,13 @@ public void visitUserQuotas(String userName, TableName table, Quotas quotas) { tableMachineQuotaFactors.containsKey(table) ? tableMachineQuotaFactors.get(table) : 1); - quotaInfo.setQuotas(table, quotas); + quotaInfo.setQuotas(conf, table, quotas); } @Override public void visitUserQuotas(String userName, Quotas quotas) { quotas = updateClusterQuotaToMachineQuota(quotas, factor); - quotaInfo.setQuotas(quotas); + quotaInfo.setQuotas(conf, quotas); } }); } catch (IOException e) { @@ -407,7 +408,7 @@ protected static UserQuotaState buildDefaultUserQuotaState(Configuration conf) { UserQuotaState state = new UserQuotaState(); QuotaProtos.Quotas defaultQuotas = QuotaProtos.Quotas.newBuilder().setThrottle(throttleBuilder.build()).build(); - state.setQuotas(defaultQuotas); + state.setQuotas(conf, defaultQuotas); return state; } @@ -420,12 +421,12 @@ private static Optional buildDefaultTimedQuota(Configuration conf, S java.util.concurrent.TimeUnit.SECONDS, org.apache.hadoop.hbase.quotas.QuotaScope.MACHINE)); } - public static Map fetchTableQuotas(final Connection connection, - Map tableMachineFactors) throws IOException { + public static Map fetchTableQuotas(final Configuration conf, + final Connection connection, Map tableMachineFactors) throws IOException { Scan scan = new Scan(); scan.addFamily(QUOTA_FAMILY_INFO); scan.setStartStopRowForPrefixScan(QUOTA_TABLE_ROW_KEY_PREFIX); - return fetchGlobalQuotas("table", scan, connection, new KeyFromRow() { + return fetchGlobalQuotas(conf, "table", scan, connection, new KeyFromRow() { @Override public TableName getKeyFromRow(final byte[] row) { assert isTableRowKey(row); @@ -439,12 +440,12 @@ public double getFactor(TableName tableName) { }); } - public static Map fetchNamespaceQuotas(final Connection connection, - double factor) throws IOException { + public static Map fetchNamespaceQuotas(final Configuration conf, + final Connection connection, double factor) throws IOException { Scan scan = new Scan(); scan.addFamily(QUOTA_FAMILY_INFO); scan.setStartStopRowForPrefixScan(QUOTA_NAMESPACE_ROW_KEY_PREFIX); - return fetchGlobalQuotas("namespace", scan, connection, new KeyFromRow() { + return fetchGlobalQuotas(conf, "namespace", scan, connection, new KeyFromRow() { @Override public String getKeyFromRow(final byte[] row) { assert isNamespaceRowKey(row); @@ -458,12 +459,12 @@ public double getFactor(String s) { }); } - public static Map fetchRegionServerQuotas(final Connection connection) - throws IOException { + public static Map fetchRegionServerQuotas(final Configuration conf, + final Connection connection) throws IOException { Scan scan = new Scan(); scan.addFamily(QUOTA_FAMILY_INFO); scan.setStartStopRowForPrefixScan(QUOTA_REGION_SERVER_ROW_KEY_PREFIX); - return fetchGlobalQuotas("regionServer", scan, connection, new KeyFromRow() { + return fetchGlobalQuotas(conf, "regionServer", scan, connection, new KeyFromRow() { @Override public String getKeyFromRow(final byte[] row) { assert isRegionServerRowKey(row); @@ -477,8 +478,9 @@ public double getFactor(String s) { }); } - public static Map fetchGlobalQuotas(final String type, final Scan scan, - final Connection connection, final KeyFromRow kfr) throws IOException { + public static Map fetchGlobalQuotas(final Configuration conf, + final String type, final Scan scan, final Connection connection, final KeyFromRow kfr) + throws IOException { Map globalQuotas = new HashMap<>(); try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { @@ -499,7 +501,7 @@ public static Map fetchGlobalQuotas(final String type, final try { Quotas quotas = quotasFromData(data); quotas = updateClusterQuotaToMachineQuota(quotas, kfr.getFactor(key)); - quotaInfo.setQuotas(quotas); + quotaInfo.setQuotas(conf, quotas); } catch (IOException e) { LOG.error("Unable to parse {} '{}' quotas", type, key, e); globalQuotas.remove(key); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java index 232471092c29..43dfab703b74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.quotas; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -32,7 +31,6 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class TimeBasedLimiter implements QuotaLimiter { - private static final Configuration conf = HBaseConfiguration.create(); private RateLimiter reqsLimiter = null; private RateLimiter reqSizeLimiter = null; private RateLimiter writeReqsLimiter = null; @@ -47,7 +45,7 @@ public class TimeBasedLimiter implements QuotaLimiter { private RateLimiter atomicWriteSizeLimiter = null; private RateLimiter reqHandlerUsageTimeLimiter = null; - private TimeBasedLimiter() { + private TimeBasedLimiter(Configuration conf) { if ( FixedIntervalRateLimiter.class.getName().equals( conf.getClass(RateLimiter.QUOTA_RATE_LIMITER_CONF_KEY, AverageIntervalRateLimiter.class) @@ -85,8 +83,8 @@ private TimeBasedLimiter() { } } - static QuotaLimiter fromThrottle(final Throttle throttle) { - TimeBasedLimiter limiter = new TimeBasedLimiter(); + static QuotaLimiter fromThrottle(Configuration conf, final Throttle throttle) { + TimeBasedLimiter limiter = new TimeBasedLimiter(conf); boolean isBypass = true; if (throttle.hasReqNum()) { setFromTimedQuota(limiter.reqsLimiter, throttle.getReqNum()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java index 877ad195c716..0704e869239b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java @@ -21,6 +21,7 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -89,8 +90,8 @@ public synchronized boolean hasBypassGlobals() { } @Override - public synchronized void setQuotas(final Quotas quotas) { - super.setQuotas(quotas); + public synchronized void setQuotas(Configuration conf, final Quotas quotas) { + super.setQuotas(conf, quotas); bypassGlobals = quotas.getBypassGlobals(); } @@ -98,30 +99,30 @@ public synchronized void setQuotas(final Quotas quotas) { * Add the quota information of the specified table. (This operation is part of the QuotaState * setup) */ - public synchronized void setQuotas(final TableName table, Quotas quotas) { - tableLimiters = setLimiter(tableLimiters, table, quotas); + public synchronized void setQuotas(Configuration conf, final TableName table, Quotas quotas) { + tableLimiters = setLimiter(conf, tableLimiters, table, quotas); } /** * Add the quota information of the specified namespace. (This operation is part of the QuotaState * setup) */ - public void setQuotas(final String namespace, Quotas quotas) { - namespaceLimiters = setLimiter(namespaceLimiters, namespace, quotas); + public void setQuotas(Configuration conf, final String namespace, Quotas quotas) { + namespaceLimiters = setLimiter(conf, namespaceLimiters, namespace, quotas); } public boolean hasTableLimiters() { return tableLimiters != null && !tableLimiters.isEmpty(); } - private Map setLimiter(Map limiters, final K key, - final Quotas quotas) { + private Map setLimiter(Configuration conf, Map limiters, + final K key, final Quotas quotas) { if (limiters == null) { limiters = new HashMap<>(); } QuotaLimiter limiter = - quotas.hasThrottle() ? QuotaLimiterFactory.fromThrottle(quotas.getThrottle()) : null; + quotas.hasThrottle() ? QuotaLimiterFactory.fromThrottle(conf, quotas.getThrottle()) : null; if (limiter != null && !limiter.isBypass()) { limiters.put(key, limiter); } else { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorQuotaUsage.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorQuotaUsage.java index 4cb6260e3be4..029f26c9eb7b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorQuotaUsage.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorQuotaUsage.java @@ -43,6 +43,8 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Category({ MediumTests.class, CoprocessorTests.class }) public class TestRegionCoprocessorQuotaUsage { @@ -51,6 +53,8 @@ public class TestRegionCoprocessorQuotaUsage { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRegionCoprocessorQuotaUsage.class); + private static final Logger LOG = LoggerFactory.getLogger(TestRegionCoprocessorQuotaUsage.class); + private static HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("TestRegionCoprocessorQuotaUsage"); private static byte[] CF = Bytes.toBytes("CF"); @@ -66,11 +70,14 @@ public void preGetOp(ObserverContext c, // For the purposes of this test, we only need to catch a throttle happening once, then // let future requests pass through so we don't make this test take any longer than necessary + LOG.info("Intercepting GetOp"); if (!THROTTLING_OCCURRED.get()) { try { c.getEnvironment().checkBatchQuota(c.getEnvironment().getRegion(), OperationQuota.OperationType.GET); + LOG.info("Request was not throttled"); } catch (RpcThrottlingException e) { + LOG.info("Intercepting was throttled"); THROTTLING_OCCURRED.set(true); throw e; } @@ -91,9 +98,8 @@ public Optional getRegionObserver() { public static void setUp() throws Exception { Configuration conf = UTIL.getConfiguration(); conf.setBoolean("hbase.quota.enabled", true); - conf.setInt("hbase.quota.default.user.machine.read.num", 2); + conf.setInt("hbase.quota.default.user.machine.read.num", 1); conf.set("hbase.quota.rate.limiter", "org.apache.hadoop.hbase.quotas.FixedIntervalRateLimiter"); - conf.set("hbase.quota.rate.limiter.refill.interval.ms", "300000"); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, MyCoprocessor.class.getName()); UTIL.startMiniCluster(3); byte[][] splitKeys = new byte[8][]; @@ -116,6 +122,9 @@ public void testGet() throws InterruptedException, ExecutionException, IOExcepti // Hit the table 5 times which ought to be enough to make a throttle happen for (int i = 0; i < 5; i++) { TABLE.get(new Get(Bytes.toBytes("000"))); + if (THROTTLING_OCCURRED.get()) { + break; + } } assertTrue("Throttling did not happen as expected", THROTTLING_OCCURRED.get()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultOperationQuota.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultOperationQuota.java index c22a03f8db00..2b9200ab6465 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultOperationQuota.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestDefaultOperationQuota.java @@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -41,6 +42,7 @@ public class TestDefaultOperationQuota { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestDefaultOperationQuota.class); + private static final Configuration conf = HBaseConfiguration.create(); private static final int DEFAULT_REQUESTS_PER_SECOND = 1000; private static ManualEnvironmentEdge envEdge = new ManualEnvironmentEdge(); static { @@ -150,7 +152,7 @@ public void testLargeBatchSaturatesReadNumLimit() QuotaProtos.Throttle throttle = QuotaProtos.Throttle.newBuilder().setReadNum(QuotaProtos.TimedQuota.newBuilder() .setSoftLimit(limit).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build()).build(); - QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(throttle); + QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(conf, throttle); DefaultOperationQuota quota = new DefaultOperationQuota(new Configuration(), 65536, DEFAULT_REQUESTS_PER_SECOND, limiter); @@ -172,7 +174,7 @@ public void testLargeBatchSaturatesReadWriteLimit() QuotaProtos.Throttle throttle = QuotaProtos.Throttle.newBuilder().setWriteNum(QuotaProtos.TimedQuota.newBuilder() .setSoftLimit(limit).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build()).build(); - QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(throttle); + QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(conf, throttle); DefaultOperationQuota quota = new DefaultOperationQuota(new Configuration(), 65536, DEFAULT_REQUESTS_PER_SECOND, limiter); @@ -194,7 +196,7 @@ public void testTooLargeReadBatchIsNotBlocked() QuotaProtos.Throttle throttle = QuotaProtos.Throttle.newBuilder().setReadNum(QuotaProtos.TimedQuota.newBuilder() .setSoftLimit(limit).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build()).build(); - QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(throttle); + QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(conf, throttle); DefaultOperationQuota quota = new DefaultOperationQuota(new Configuration(), 65536, DEFAULT_REQUESTS_PER_SECOND, limiter); @@ -216,7 +218,7 @@ public void testTooLargeWriteBatchIsNotBlocked() QuotaProtos.Throttle throttle = QuotaProtos.Throttle.newBuilder().setWriteNum(QuotaProtos.TimedQuota.newBuilder() .setSoftLimit(limit).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build()).build(); - QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(throttle); + QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(conf, throttle); DefaultOperationQuota quota = new DefaultOperationQuota(new Configuration(), 65536, DEFAULT_REQUESTS_PER_SECOND, limiter); @@ -238,7 +240,7 @@ public void testTooLargeWriteSizeIsNotBlocked() QuotaProtos.Throttle throttle = QuotaProtos.Throttle.newBuilder().setWriteSize(QuotaProtos.TimedQuota.newBuilder() .setSoftLimit(limit).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build()).build(); - QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(throttle); + QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(conf, throttle); DefaultOperationQuota quota = new DefaultOperationQuota(new Configuration(), 65536, DEFAULT_REQUESTS_PER_SECOND, limiter); @@ -261,7 +263,7 @@ public void testTooLargeReadSizeIsNotBlocked() QuotaProtos.Throttle throttle = QuotaProtos.Throttle.newBuilder().setReadSize(QuotaProtos.TimedQuota.newBuilder() .setSoftLimit(limit).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build()).build(); - QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(throttle); + QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(conf, throttle); DefaultOperationQuota quota = new DefaultOperationQuota(new Configuration(), (int) blockSize, DEFAULT_REQUESTS_PER_SECOND, limiter); @@ -284,7 +286,7 @@ public void testTooLargeRequestSizeIsNotBlocked() QuotaProtos.Throttle throttle = QuotaProtos.Throttle.newBuilder().setReqSize(QuotaProtos.TimedQuota.newBuilder() .setSoftLimit(limit).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build()).build(); - QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(throttle); + QuotaLimiter limiter = TimeBasedLimiter.fromThrottle(conf, throttle); DefaultOperationQuota quota = new DefaultOperationQuota(new Configuration(), (int) blockSize, DEFAULT_REQUESTS_PER_SECOND, limiter); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaCache2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaCache2.java index 2c33b265771a..8f8ac4991ca6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaCache2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaCache2.java @@ -23,7 +23,9 @@ import java.util.HashMap; import java.util.Map; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; @@ -43,6 +45,8 @@ public class TestQuotaCache2 { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestQuotaCache2.class); + private static final Configuration conf = HBaseConfiguration.create(); + @Test public void testPreserveLimiterAvailability() throws Exception { // establish old cache with a limiter for 100 read bytes per second @@ -53,7 +57,7 @@ public void testPreserveLimiterAvailability() throws Exception { .setReadSize(QuotaProtos.TimedQuota.newBuilder().setTimeUnit(HBaseProtos.TimeUnit.SECONDS) .setSoftLimit(100).setScope(QuotaProtos.QuotaScope.MACHINE).build()) .build(); - QuotaLimiter limiter1 = TimeBasedLimiter.fromThrottle(throttle1); + QuotaLimiter limiter1 = TimeBasedLimiter.fromThrottle(conf, throttle1); oldState.setGlobalLimiter(limiter1); // consume one byte from the limiter, so 99 will be left @@ -67,7 +71,7 @@ public void testPreserveLimiterAvailability() throws Exception { .setReadSize(QuotaProtos.TimedQuota.newBuilder().setTimeUnit(HBaseProtos.TimeUnit.SECONDS) .setSoftLimit(100).setScope(QuotaProtos.QuotaScope.MACHINE).build()) .build(); - QuotaLimiter limiter2 = TimeBasedLimiter.fromThrottle(throttle2); + QuotaLimiter limiter2 = TimeBasedLimiter.fromThrottle(conf, throttle2); newState.setGlobalLimiter(limiter2); // update new cache from old cache @@ -89,7 +93,7 @@ public void testClobberLimiterLimit() throws Exception { .setReadSize(QuotaProtos.TimedQuota.newBuilder().setTimeUnit(HBaseProtos.TimeUnit.SECONDS) .setSoftLimit(100).setScope(QuotaProtos.QuotaScope.MACHINE).build()) .build(); - QuotaLimiter limiter1 = TimeBasedLimiter.fromThrottle(throttle1); + QuotaLimiter limiter1 = TimeBasedLimiter.fromThrottle(conf, throttle1); oldState.setGlobalLimiter(limiter1); // establish new cache, also with a limiter for 100 read bytes per second @@ -100,7 +104,7 @@ public void testClobberLimiterLimit() throws Exception { .setReadSize(QuotaProtos.TimedQuota.newBuilder().setTimeUnit(HBaseProtos.TimeUnit.SECONDS) .setSoftLimit(50).setScope(QuotaProtos.QuotaScope.MACHINE).build()) .build(); - QuotaLimiter limiter2 = TimeBasedLimiter.fromThrottle(throttle2); + QuotaLimiter limiter2 = TimeBasedLimiter.fromThrottle(conf, throttle2); newState.setGlobalLimiter(limiter2); // update new cache from old cache diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java index ff4b6bc9949b..b45f78b07653 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java @@ -22,7 +22,9 @@ import static org.junit.Assert.fail; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -48,6 +50,8 @@ public class TestQuotaState { @Rule public TestName name = new TestName(); + private static final Configuration conf = HBaseConfiguration.create(); + @Test public void testQuotaStateBypass() { QuotaState quotaInfo = new QuotaState(); @@ -69,11 +73,11 @@ public void testSimpleQuotaStateOperation() { assertTrue(quotaInfo.isBypass()); // Set global quota - quotaInfo.setQuotas(buildReqNumThrottle(NUM_GLOBAL_THROTTLE)); + quotaInfo.setQuotas(conf, buildReqNumThrottle(NUM_GLOBAL_THROTTLE)); assertFalse(quotaInfo.isBypass()); // Set table quota - quotaInfo.setQuotas(tableName, buildReqNumThrottle(NUM_TABLE_THROTTLE)); + quotaInfo.setQuotas(conf, tableName, buildReqNumThrottle(NUM_TABLE_THROTTLE)); assertFalse(quotaInfo.isBypass()); assertTrue(quotaInfo.getGlobalLimiter() == quotaInfo.getTableLimiter(UNKNOWN_TABLE_NAME)); assertThrottleException(quotaInfo.getTableLimiter(UNKNOWN_TABLE_NAME), NUM_GLOBAL_THROTTLE); @@ -90,7 +94,7 @@ public void testQuotaStateUpdateGlobalThrottle() { // Add global throttle QuotaState otherQuotaState = new QuotaState(); - otherQuotaState.setQuotas(buildReqNumThrottle(NUM_GLOBAL_THROTTLE_1)); + otherQuotaState.setQuotas(conf, buildReqNumThrottle(NUM_GLOBAL_THROTTLE_1)); assertFalse(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); @@ -99,7 +103,7 @@ public void testQuotaStateUpdateGlobalThrottle() { // Update global Throttle otherQuotaState = new QuotaState(); - otherQuotaState.setQuotas(buildReqNumThrottle(NUM_GLOBAL_THROTTLE_2)); + otherQuotaState.setQuotas(conf, buildReqNumThrottle(NUM_GLOBAL_THROTTLE_2)); assertFalse(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); @@ -131,8 +135,8 @@ public void testQuotaStateUpdateTableThrottle() { // Add A B table limiters UserQuotaState otherQuotaState = new UserQuotaState(); - otherQuotaState.setQuotas(tableNameA, buildReqNumThrottle(TABLE_A_THROTTLE_1)); - otherQuotaState.setQuotas(tableNameB, buildReqNumThrottle(TABLE_B_THROTTLE)); + otherQuotaState.setQuotas(conf, tableNameA, buildReqNumThrottle(TABLE_A_THROTTLE_1)); + otherQuotaState.setQuotas(conf, tableNameB, buildReqNumThrottle(TABLE_B_THROTTLE)); assertFalse(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); @@ -143,8 +147,8 @@ public void testQuotaStateUpdateTableThrottle() { // Add C, Remove B, Update A table limiters otherQuotaState = new UserQuotaState(); - otherQuotaState.setQuotas(tableNameA, buildReqNumThrottle(TABLE_A_THROTTLE_2)); - otherQuotaState.setQuotas(tableNameC, buildReqNumThrottle(TABLE_C_THROTTLE)); + otherQuotaState.setQuotas(conf, tableNameA, buildReqNumThrottle(TABLE_A_THROTTLE_2)); + otherQuotaState.setQuotas(conf, tableNameC, buildReqNumThrottle(TABLE_C_THROTTLE)); assertFalse(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); @@ -173,7 +177,7 @@ public void testTableThrottleWithBatch() { // Add A table limiters UserQuotaState otherQuotaState = new UserQuotaState(); - otherQuotaState.setQuotas(TABLE_A, buildReqNumThrottle(TABLE_A_THROTTLE_1)); + otherQuotaState.setQuotas(conf, TABLE_A, buildReqNumThrottle(TABLE_A_THROTTLE_1)); assertFalse(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState);