diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java index 4ac1d7c6396f..9666bc0a99ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java @@ -36,6 +36,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.IntSupplier; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.conf.ConfigurationManager; import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; @@ -344,6 +345,11 @@ protected void requestCompactionInternal(HRegion region, HStore store, String wh return; } + if (isReadOnlyEnabled()) { + LOG.info("Ignoring compaction request for " + region + ",because read-only mode is on."); + return; + } + if ( this.server.isStopped() || (region.getTableDescriptor() != null && !region.getTableDescriptor().isCompactionEnabled()) @@ -442,6 +448,13 @@ private Optional selectCompaction(HRegion region, HStore stor LOG.info(String.format("User has disabled compactions")); return Optional.empty(); } + + // Should not allow compaction if cluster is in read-only mode + if (isReadOnlyEnabled()) { + LOG.info(String.format("Compaction request skipped as read-only mode is on")); + return Optional.empty(); + } + Optional compaction = store.requestCompaction(priority, tracker, user); if (!compaction.isPresent() && region.getRegionInfo() != null) { String reason = "Not compacting " + region.getRegionInfo().getRegionNameAsString() @@ -856,6 +869,11 @@ public boolean isCompactionsEnabled() { return compactionsEnabled; } + private boolean isReadOnlyEnabled() { + return conf.getBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY, + HConstants.HBASE_GLOBAL_READONLY_ENABLED_DEFAULT); + } + public void setCompactionsEnabled(boolean compactionsEnabled) { this.compactionsEnabled = compactionsEnabled; this.conf.setBoolean(HBASE_REGION_SERVER_ENABLE_COMPACTION, compactionsEnabled); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java index bc24ad579444..29d8e0bcb485 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java @@ -143,7 +143,7 @@ private HFileContext createFileContext(Compression.Algorithm compression, public final StoreFileWriter createWriter(CreateStoreFileWriterParams params) throws IOException { if (!isPrimaryReplica || isReadOnlyEnabled()) { throw new IllegalStateException( - "Should not call create writer on secondary replicas or in read only mode"); + "Should not call create writer on secondary replicas or in read-only mode"); } // creating new cache config for each new writer final CacheConfig cacheConf = ctx.getCacheConf(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java index 5b7ab67df0bf..7bd16d10ef31 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java @@ -53,6 +53,9 @@ import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; +import org.apache.hadoop.hbase.regionserver.Store; +import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.yetus.audience.InterfaceAudience; @@ -81,6 +84,7 @@ private void internalReadOnlyGuard() throws IOException { @Override public void start(CoprocessorEnvironment env) throws IOException { + this.globalReadOnlyEnabled = env.getConfiguration().getBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY, HConstants.HBASE_GLOBAL_READONLY_ENABLED_DEFAULT); @@ -131,6 +135,13 @@ public void preFlush(final ObserverContext c, + Store store, List candidates, CompactionLifeCycleTracker tracker) + throws IOException { + internalReadOnlyGuard(); + } + @Override public boolean preCheckAndPut(ObserverContext c, byte[] row, byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator,