diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java index 47b36a7a1516..b029d0288564 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -138,4 +138,10 @@ default String getNameAsString() { /** Returns the compaction state of this region */ CompactionState getCompactionState(); + + /** Returns the total size of the hfiles in the region */ + Size getRegionSizeMB(); + + /** Returns current prefetch ratio of this region on this server */ + float getCurrentRegionCachedRatio(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java index 43b3a17aac17..d3361693079a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java @@ -80,7 +80,8 @@ public static RegionMetrics toRegionMetrics(ClusterStatusProtos.RegionLoad regio ClusterStatusProtos.StoreSequenceId::getSequenceId))) .setUncompressedStoreFileSize( new Size(regionLoadPB.getStoreUncompressedSizeMB(), Size.Unit.MEGABYTE)) - .build(); + .setRegionSizeMB(new Size(regionLoadPB.getRegionSizeMB(), Size.Unit.MEGABYTE)) + .setCurrentRegionCachedRatio(regionLoadPB.getCurrentRegionCachedRatio()).build(); } private static List @@ -120,7 +121,8 @@ public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMe .addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId())) .setStoreUncompressedSizeMB( (int) regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE)) - .build(); + .setRegionSizeMB((int) regionMetrics.getRegionSizeMB().get(Size.Unit.MEGABYTE)) + .setCurrentRegionCachedRatio(regionMetrics.getCurrentRegionCachedRatio()).build(); } public static RegionMetricsBuilder newBuilder(byte[] name) { @@ -154,6 +156,8 @@ public static RegionMetricsBuilder newBuilder(byte[] name) { private long blocksLocalWithSsdWeight; private long blocksTotalWeight; private CompactionState compactionState; + private Size regionSizeMB = Size.ZERO; + private float currentRegionCachedRatio; private RegionMetricsBuilder(byte[] name) { this.name = name; @@ -289,6 +293,16 @@ public RegionMetricsBuilder setCompactionState(CompactionState compactionState) return this; } + public RegionMetricsBuilder setRegionSizeMB(Size value) { + this.regionSizeMB = value; + return this; + } + + public RegionMetricsBuilder setCurrentRegionCachedRatio(float value) { + this.currentRegionCachedRatio = value; + return this; + } + public RegionMetrics build() { return new RegionMetricsImpl(name, storeCount, storeFileCount, storeRefCount, maxCompactedStoreFileRefCount, compactingCellCount, compactedCellCount, storeFileSize, @@ -296,7 +310,7 @@ public RegionMetrics build() { uncompressedStoreFileSize, writeRequestCount, readRequestCount, cpRequestCount, filteredReadRequestCount, completedSequenceId, storeSequenceIds, dataLocality, lastMajorCompactionTimestamp, dataLocalityForSsd, blocksLocalWeight, blocksLocalWithSsdWeight, - blocksTotalWeight, compactionState); + blocksTotalWeight, compactionState, regionSizeMB, currentRegionCachedRatio); } private static class RegionMetricsImpl implements RegionMetrics { @@ -327,6 +341,8 @@ private static class RegionMetricsImpl implements RegionMetrics { private final long blocksLocalWithSsdWeight; private final long blocksTotalWeight; private final CompactionState compactionState; + private final Size regionSizeMB; + private final float currentRegionCachedRatio; RegionMetricsImpl(byte[] name, int storeCount, int storeFileCount, int storeRefCount, int maxCompactedStoreFileRefCount, final long compactingCellCount, long compactedCellCount, @@ -336,7 +352,7 @@ private static class RegionMetricsImpl implements RegionMetrics { long filteredReadRequestCount, long completedSequenceId, Map storeSequenceIds, float dataLocality, long lastMajorCompactionTimestamp, float dataLocalityForSsd, long blocksLocalWeight, long blocksLocalWithSsdWeight, long blocksTotalWeight, - CompactionState compactionState) { + CompactionState compactionState, Size regionSizeMB, float currentRegionCachedRatio) { this.name = Preconditions.checkNotNull(name); this.storeCount = storeCount; this.storeFileCount = storeFileCount; @@ -364,6 +380,8 @@ private static class RegionMetricsImpl implements RegionMetrics { this.blocksLocalWithSsdWeight = blocksLocalWithSsdWeight; this.blocksTotalWeight = blocksTotalWeight; this.compactionState = compactionState; + this.regionSizeMB = regionSizeMB; + this.currentRegionCachedRatio = currentRegionCachedRatio; } @Override @@ -501,6 +519,16 @@ public CompactionState getCompactionState() { return compactionState; } + @Override + public Size getRegionSizeMB() { + return regionSizeMB; + } + + @Override + public float getCurrentRegionCachedRatio() { + return currentRegionCachedRatio; + } + @Override public String toString() { StringBuilder sb = @@ -541,6 +569,8 @@ public String toString() { Strings.appendKeyValue(sb, "blocksLocalWithSsdWeight", blocksLocalWithSsdWeight); Strings.appendKeyValue(sb, "blocksTotalWeight", blocksTotalWeight); Strings.appendKeyValue(sb, "compactionState", compactionState); + Strings.appendKeyValue(sb, "regionSizeMB", regionSizeMB); + Strings.appendKeyValue(sb, "currentRegionCachedRatio", currentRegionCachedRatio); return sb.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java index 2684886ba3d5..2cf55a1abdc0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java @@ -106,4 +106,10 @@ default String getVersion() { @Nullable List getTasks(); + /** + * Returns the region cache information for the regions hosted on this server + * @return map of region encoded name and the size of the region cached on this region server + * rounded to MB + */ + Map getRegionCachedInfo(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java index 7a0312f22fdc..c7aea21e845a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java @@ -85,6 +85,7 @@ public static ServerMetrics toServerMetrics(ServerName serverName, int versionNu : null) .setTasks(serverLoadPB.getTasksList().stream().map(ProtobufUtil::getServerTask) .collect(Collectors.toList())) + .setRegionCachedInfo(serverLoadPB.getRegionCachedInfoMap()) .setReportTimestamp(serverLoadPB.getReportEndTime()) .setLastReportTimestamp(serverLoadPB.getReportStartTime()).setVersionNumber(versionNumber) .setVersion(version).build(); @@ -111,6 +112,7 @@ public static ClusterStatusProtos.ServerLoad toServerLoad(ServerMetrics metrics) .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) .addAllTasks( metrics.getTasks().stream().map(ProtobufUtil::toServerTask).collect(Collectors.toList())) + .putAllRegionCachedInfo(metrics.getRegionCachedInfo()) .setReportStartTime(metrics.getLastReportTimestamp()) .setReportEndTime(metrics.getReportTimestamp()); if (metrics.getReplicationLoadSink() != null) { @@ -142,6 +144,7 @@ public static ServerMetricsBuilder newBuilder(ServerName sn) { private long reportTimestamp = EnvironmentEdgeManager.currentTime(); private long lastReportTimestamp = 0; private final List tasks = new ArrayList<>(); + private Map regionCachedInfo = new HashMap<>(); private ServerMetricsBuilder(ServerName serverName) { this.serverName = serverName; @@ -232,11 +235,16 @@ public ServerMetricsBuilder setTasks(List tasks) { return this; } + public ServerMetricsBuilder setRegionCachedInfo(Map value) { + this.regionCachedInfo = value; + return this; + } + public ServerMetrics build() { return new ServerMetricsImpl(serverName, versionNumber, version, requestCountPerSecond, requestCount, readRequestCount, writeRequestCount, usedHeapSize, maxHeapSize, infoServerPort, sources, sink, regionStatus, coprocessorNames, reportTimestamp, lastReportTimestamp, - userMetrics, tasks); + userMetrics, tasks, regionCachedInfo); } private static class ServerMetricsImpl implements ServerMetrics { @@ -259,13 +267,15 @@ private static class ServerMetricsImpl implements ServerMetrics { private final long lastReportTimestamp; private final Map userMetrics; private final List tasks; + private final Map regionCachedInfo; ServerMetricsImpl(ServerName serverName, int versionNumber, String version, long requestCountPerSecond, long requestCount, long readRequestsCount, long writeRequestsCount, Size usedHeapSize, Size maxHeapSize, int infoServerPort, List sources, ReplicationLoadSink sink, Map regionStatus, Set coprocessorNames, long reportTimestamp, - long lastReportTimestamp, Map userMetrics, List tasks) { + long lastReportTimestamp, Map userMetrics, List tasks, + Map regionCachedInfo) { this.serverName = Preconditions.checkNotNull(serverName); this.versionNumber = versionNumber; this.version = version; @@ -284,6 +294,7 @@ private static class ServerMetricsImpl implements ServerMetrics { this.reportTimestamp = reportTimestamp; this.lastReportTimestamp = lastReportTimestamp; this.tasks = tasks; + this.regionCachedInfo = regionCachedInfo; } @Override @@ -386,6 +397,11 @@ public List getTasks() { return tasks; } + @Override + public Map getRegionCachedInfo() { + return Collections.unmodifiableMap(regionCachedInfo); + } + @Override public String toString() { int storeCount = 0; diff --git a/hbase-protocol-shaded/src/main/protobuf/PrefetchPersistence.proto b/hbase-protocol-shaded/src/main/protobuf/PrefetchPersistence.proto deleted file mode 100644 index a024b94baa62..000000000000 --- a/hbase-protocol-shaded/src/main/protobuf/PrefetchPersistence.proto +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated"; -option java_outer_classname = "PersistentPrefetchProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - - -message PrefetchedHfileName { - map prefetched_files = 1; -} - -message RegionFileSizeMap { - required string region_name = 1; - required uint64 region_prefetch_size = 2; -} diff --git a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto index 28cc5a865c23..58fd3c8d2a5b 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto @@ -177,6 +177,12 @@ message RegionLoad { MAJOR = 2; MAJOR_AND_MINOR = 3; } + + /** Total region size in MB */ + optional uint32 region_size_MB = 28; + + /** Current region cache ratio on this server */ + optional float current_region_cached_ratio = 29; } message UserLoad { @@ -315,6 +321,11 @@ message ServerLoad { * The active monitored tasks */ repeated ServerTask tasks = 15; + + /** + * The metrics for region cached on this region server + */ + map regionCachedInfo = 16; } message LiveServerInfo { diff --git a/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto b/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto index ae1980fe51e6..80fc10ada786 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto @@ -32,7 +32,7 @@ message BucketCacheEntry { map deserializers = 4; required BackingMap backing_map = 5; optional bytes checksum = 6; - map prefetched_files = 7; + map cached_files = 7; } message BackingMap { @@ -81,3 +81,9 @@ enum BlockPriority { multi = 1; memory = 2; } + +message RegionFileSizeMap { + required string region_name = 1; + required uint64 region_cached_size = 2; +} + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index e480c9b5789b..91ebaaabd422 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -20,6 +20,7 @@ import java.util.Iterator; import java.util.Map; import java.util.Optional; +import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; /** @@ -167,7 +168,7 @@ default boolean isMetaBlock(BlockType blockType) { /** * Returns the list of fully cached files */ - default Optional> getFullyCachedFiles() { + default Optional>> getFullyCachedFiles() { return Optional.empty(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index a421dfc83aa0..1e0fe7709292 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -22,6 +22,7 @@ import java.util.Optional; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; +import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -431,7 +432,7 @@ public BlockCache[] getBlockCaches() { * Returns the list of fully cached files */ @Override - public Optional> getFullyCachedFiles() { + public Optional>> getFullyCachedFiles() { return this.l2Cache.getFullyCachedFiles(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index 2079dcafb65f..7cdbd5aff486 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -115,7 +115,8 @@ public void run() { block.release(); } } - bucketCacheOptional.ifPresent(bc -> bc.fileCacheCompleted(path.getName())); + final long fileSize = offset; + bucketCacheOptional.ifPresent(bc -> bc.fileCacheCompleted(path, fileSize)); } catch (IOException e) { // IOExceptions are probably due to region closes (relocation, etc.) if (LOG.isTraceEnabled()) { @@ -132,7 +133,6 @@ public void run() { LOG.warn("Close prefetch stream reader failed, path: " + path, e); } } - String regionName = getRegionName(path); PrefetchExecutor.complete(path); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchProtoUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchProtoUtils.java deleted file mode 100644 index df67e4429a2d..000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchProtoUtils.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io.hfile; - -import java.util.HashMap; -import java.util.Map; -import org.apache.hadoop.hbase.util.Pair; - -import org.apache.hadoop.hbase.shaded.protobuf.generated.PersistentPrefetchProtos; - -final class PrefetchProtoUtils { - private PrefetchProtoUtils() { - } - - static PersistentPrefetchProtos.PrefetchedHfileName - toPB(Map> prefetchedHfileNames) { - Map tmpMap = new HashMap<>(); - prefetchedHfileNames.forEach((hFileName, regionPrefetchMap) -> { - PersistentPrefetchProtos.RegionFileSizeMap tmpRegionFileSize = - PersistentPrefetchProtos.RegionFileSizeMap.newBuilder() - .setRegionName(regionPrefetchMap.getFirst()) - .setRegionPrefetchSize(regionPrefetchMap.getSecond()).build(); - tmpMap.put(hFileName, tmpRegionFileSize); - }); - return PersistentPrefetchProtos.PrefetchedHfileName.newBuilder().putAllPrefetchedFiles(tmpMap) - .build(); - } - - static Map> - fromPB(Map prefetchHFileNames) { - Map> hFileMap = new HashMap<>(); - prefetchHFileNames.forEach((hFileName, regionPrefetchMap) -> { - hFileMap.put(hFileName, - new Pair<>(regionPrefetchMap.getRegionName(), regionPrefetchMap.getRegionPrefetchSize())); - }); - return hFileMap; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index e3d740383085..ca7750c92c56 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.Iterator; @@ -51,6 +52,7 @@ import java.util.function.Consumer; import java.util.function.Function; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.TableName; @@ -79,6 +81,7 @@ import org.apache.hadoop.hbase.util.IdReadWriteLockStrongRef; import org.apache.hadoop.hbase.util.IdReadWriteLockWithObjectPool; import org.apache.hadoop.hbase.util.IdReadWriteLockWithObjectPool.ReferenceType; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -151,8 +154,17 @@ public class BucketCache implements BlockCache, HeapSize { private AtomicBoolean backingMapValidated = new AtomicBoolean(false); - /** Set of files for which prefetch is completed */ - final Map fullyCachedFiles = new ConcurrentHashMap<>(); + /** + * Map of hFile -> Region -> File size. This map is used to track all files completed prefetch, + * together with the region those belong to and the total cached size for the + * region.TestBlockEvictionOnRegionMovement + */ + final Map> fullyCachedFiles = new ConcurrentHashMap<>(); + /** + * Map of region -> total size of the region prefetched on this region server. This is the total + * size of hFiles for this region prefetched on this region server + */ + final Map regionCachedSizeMap = new ConcurrentHashMap<>(); private BucketCachePersister cachePersister; @@ -546,7 +558,6 @@ protected void cacheBlockWithWaitInternal(BlockCacheKey cacheKey, Cacheable cach } else { this.blockNumber.increment(); this.heapSize.add(cachedItem.heapSize()); - blocksByHFile.add(cacheKey); } } @@ -636,15 +647,11 @@ void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decre cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary()); } if (ioEngine.isPersistent()) { - fullyCachedFiles.remove(cacheKey.getHfileName()); + removeFileFromPrefetch(cacheKey.getHfileName()); setCacheInconsistent(true); } } - public void fileCacheCompleted(String fileName) { - fullyCachedFiles.put(fileName, true); - } - /** * Free the {{@link BucketEntry} actually,which could only be invoked when the * {@link BucketEntry#refCnt} becoming 0. @@ -1300,6 +1307,10 @@ public boolean isCachePersistent() { return ioEngine.isPersistent() && persistencePath != null; } + public Map getRegionCachedInfo() { + return Collections.unmodifiableMap(regionCachedSizeMap); + } + /** * @see #persistToFile() */ @@ -1337,6 +1348,29 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException { } } + private void updateRegionSizeMapWhileRetrievingFromFile() { + // Update the regionCachedSizeMap with the region size while restarting the region server + if (LOG.isDebugEnabled()) { + LOG.debug("Updating region size map after retrieving cached file list"); + dumpPrefetchList(); + } + regionCachedSizeMap.clear(); + fullyCachedFiles.forEach((hFileName, hFileSize) -> { + // Get the region name for each file + String regionEncodedName = hFileSize.getFirst(); + long cachedFileSize = hFileSize.getSecond(); + regionCachedSizeMap.merge(regionEncodedName, cachedFileSize, + (oldpf, fileSize) -> oldpf + fileSize); + }); + } + + private void dumpPrefetchList() { + for (Map.Entry> outerEntry : fullyCachedFiles.entrySet()) { + LOG.debug("Cached File Entry:<{},<{},{}>>", outerEntry.getKey(), + outerEntry.getValue().getFirst(), outerEntry.getValue().getSecond()); + } + } + /** * Create an input stream that deletes the file after reading it. Use in try-with-resources to * avoid this pattern where an exception thrown from a finally block may mask earlier exceptions: @@ -1401,7 +1435,7 @@ private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws IOExceptio backingMap = BucketProtoUtils.fromPB(proto.getDeserializersMap(), proto.getBackingMap(), this::createRecycler); fullyCachedFiles.clear(); - fullyCachedFiles.putAll(proto.getPrefetchedFilesMap()); + fullyCachedFiles.putAll(BucketProtoUtils.fromPB(proto.getCachedFilesMap())); if (proto.hasChecksum()) { try { ((PersistentIOEngine) ioEngine).verifyFileIntegrity(proto.getChecksum().toByteArray(), @@ -1444,6 +1478,7 @@ private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws IOExceptio LOG.info("Persistent file is old format, it does not support verifying file integrity!"); backingMapValidated.set(true); } + updateRegionSizeMapWhileRetrievingFromFile(); verifyCapacityAndClasses(proto.getCacheCapacity(), proto.getIoClass(), proto.getMapClass()); } @@ -1581,7 +1616,7 @@ protected String getAlgorithm() { */ @Override public int evictBlocksByHfileName(String hfileName) { - this.fullyCachedFiles.remove(hfileName); + removeFileFromPrefetch(hfileName); Set keySet = blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true); @@ -1966,7 +2001,7 @@ public AtomicBoolean getBackingMapValidated() { } @Override - public Optional> getFullyCachedFiles() { + public Optional>> getFullyCachedFiles() { return Optional.of(fullyCachedFiles); } @@ -1985,4 +2020,33 @@ public static Optional getBucketCacheFromCacheConfig(CacheConfig ca return Optional.empty(); } + private void removeFileFromPrefetch(String hfileName) { + // Update the regionPrefetchedSizeMap before removing the file from prefetchCompleted + if (fullyCachedFiles.containsKey(hfileName)) { + Pair regionEntry = fullyCachedFiles.get(hfileName); + String regionEncodedName = regionEntry.getFirst(); + long filePrefetchSize = regionEntry.getSecond(); + LOG.debug("Removing file {} for region {}", hfileName, regionEncodedName); + regionCachedSizeMap.computeIfPresent(regionEncodedName, (rn, pf) -> pf - filePrefetchSize); + // If all the blocks for a region are evicted from the cache, remove the entry for that region + if ( + regionCachedSizeMap.containsKey(regionEncodedName) + && regionCachedSizeMap.get(regionEncodedName) == 0 + ) { + regionCachedSizeMap.remove(regionEncodedName); + } + } + fullyCachedFiles.remove(hfileName); + } + + public void fileCacheCompleted(Path filePath, long size) { + Pair pair = new Pair<>(); + // sets the region name + String regionName = filePath.getParent().getParent().getName(); + pair.setFirst(regionName); + pair.setSecond(size); + fullyCachedFiles.put(filePath.getName(), pair); + regionCachedSizeMap.merge(regionName, size, (oldpf, fileSize) -> oldpf + fileSize); + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java index 8830e5d3255a..7cc5050506e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; +import java.util.HashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; @@ -28,6 +29,7 @@ import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager; import org.apache.hadoop.hbase.io.hfile.HFileBlock; +import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; @@ -45,7 +47,7 @@ static BucketCacheProtos.BucketCacheEntry toPB(BucketCache cache) { .setIoClass(cache.ioEngine.getClass().getName()) .setMapClass(cache.backingMap.getClass().getName()) .putAllDeserializers(CacheableDeserializerIdManager.save()) - .putAllPrefetchedFiles(cache.fullyCachedFiles) + .putAllCachedFiles(toCachedPB(cache.fullyCachedFiles)) .setBackingMap(BucketProtoUtils.toPB(cache.backingMap)) .setChecksum(ByteString .copyFrom(((PersistentIOEngine) cache.ioEngine).calculateChecksum(cache.getAlgorithm()))) @@ -185,4 +187,26 @@ private static BlockType fromPb(BucketCacheProtos.BlockType blockType) { throw new Error("Unrecognized BlockType."); } } + + static Map + toCachedPB(Map> prefetchedHfileNames) { + Map tmpMap = new HashMap<>(); + prefetchedHfileNames.forEach((hfileName, regionPrefetchMap) -> { + BucketCacheProtos.RegionFileSizeMap tmpRegionFileSize = + BucketCacheProtos.RegionFileSizeMap.newBuilder().setRegionName(regionPrefetchMap.getFirst()) + .setRegionCachedSize(regionPrefetchMap.getSecond()).build(); + tmpMap.put(hfileName, tmpRegionFileSize); + }); + return tmpMap; + } + + static Map> + fromPB(Map prefetchHFileNames) { + Map> hfileMap = new HashMap<>(); + prefetchHFileNames.forEach((hfileName, regionPrefetchMap) -> { + hfileMap.put(hfileName, + new Pair<>(regionPrefetchMap.getRegionName(), regionPrefetchMap.getRegionCachedSize())); + }); + return hfileMap; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 85721a354977..3042a2eae451 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -65,10 +65,12 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; import java.util.stream.Collectors; import javax.management.MalformedObjectNameException; import javax.servlet.http.HttpServlet; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.mutable.MutableFloat; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -108,7 +110,9 @@ import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; +import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache; import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcClient; @@ -239,6 +243,9 @@ public class HRegionServer extends HBaseServerBase private static final Logger LOG = LoggerFactory.getLogger(HRegionServer.class); + int unitMB = 1024 * 1024; + int unitKB = 1024; + /** * For testing only! Set to true to skip notifying region assignment to master . */ @@ -1211,6 +1218,11 @@ private ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, lon serverLoad.addCoprocessors(coprocessorBuilder.setName(coprocessor).build()); } } + computeIfPersistentBucketCache(bc -> { + bc.getRegionCachedInfo().forEach((regionName, prefetchSize) -> { + serverLoad.putRegionCachedInfo(regionName, roundSize(prefetchSize, unitMB)); + }); + }); serverLoad.setReportStartTime(reportStartTime); serverLoad.setReportEndTime(reportEndTime); if (this.infoServer != null) { @@ -1510,6 +1522,15 @@ private static int roundSize(long sizeInByte, int sizeUnit) { } } + private void computeIfPersistentBucketCache(Consumer computation) { + if (blockCache instanceof CombinedBlockCache) { + BlockCache l2 = ((CombinedBlockCache) blockCache).getSecondLevelCache(); + if (l2 instanceof BucketCache && ((BucketCache) l2).isCachePersistent()) { + computation.accept((BucketCache) l2); + } + } + } + /** * @param r Region to get RegionLoad for. * @param regionLoadBldr the RegionLoad.Builder, can be null @@ -1519,6 +1540,7 @@ private static int roundSize(long sizeInByte, int sizeUnit) { RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, RegionSpecifier.Builder regionSpecifier) throws IOException { byte[] name = r.getRegionInfo().getRegionName(); + String regionEncodedName = r.getRegionInfo().getEncodedName(); int stores = 0; int storefiles = 0; int storeRefCount = 0; @@ -1531,6 +1553,7 @@ RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, long totalStaticBloomSize = 0L; long totalCompactingKVs = 0L; long currentCompactedKVs = 0L; + long totalRegionSize = 0L; List storeList = r.getStores(); stores += storeList.size(); for (HStore store : storeList) { @@ -1542,6 +1565,7 @@ RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, Math.max(maxCompactedStoreFileRefCount, currentMaxCompactedStoreFileRefCount); storeUncompressedSize += store.getStoreSizeUncompressed(); storefileSize += store.getStorefilesSize(); + totalRegionSize += store.getHFilesSize(); // TODO: storefileIndexSizeKB is same with rootLevelIndexSizeKB? storefileIndexSize += store.getStorefilesRootLevelIndexSize(); CompactionProgress progress = store.getCompactionProgress(); @@ -1554,9 +1578,6 @@ RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, totalStaticBloomSize += store.getTotalStaticBloomSize(); } - int unitMB = 1024 * 1024; - int unitKB = 1024; - int memstoreSizeMB = roundSize(r.getMemStoreDataSize(), unitMB); int storeUncompressedSizeMB = roundSize(storeUncompressedSize, unitMB); int storefileSizeMB = roundSize(storefileSize, unitMB); @@ -1564,6 +1585,16 @@ RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, int rootLevelIndexSizeKB = roundSize(rootLevelIndexSize, unitKB); int totalStaticIndexSizeKB = roundSize(totalStaticIndexSize, unitKB); int totalStaticBloomSizeKB = roundSize(totalStaticBloomSize, unitKB); + int regionSizeMB = roundSize(totalRegionSize, unitMB); + final MutableFloat currentRegionCachedRatio = new MutableFloat(0.0f); + computeIfPersistentBucketCache(bc -> { + if (bc.getRegionCachedInfo().containsKey(regionEncodedName)) { + currentRegionCachedRatio.setValue(regionSizeMB == 0 + ? 0.0f + : (float) roundSize(bc.getRegionCachedInfo().get(regionEncodedName), unitMB) + / regionSizeMB); + } + }); HDFSBlocksDistribution hdfsBd = r.getHDFSBlocksDistribution(); float dataLocality = hdfsBd.getBlockLocalityIndex(serverName.getHostname()); @@ -1594,7 +1625,8 @@ RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, .setDataLocalityForSsd(dataLocalityForSsd).setBlocksLocalWeight(blocksLocalWeight) .setBlocksLocalWithSsdWeight(blocksLocalWithSsdWeight).setBlocksTotalWeight(blocksTotalWeight) .setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad(r.getCompactionState())) - .setLastMajorCompactionTs(r.getOldestHfileTs(true)); + .setLastMajorCompactionTs(r.getOldestHfileTs(true)).setRegionSizeMB(regionSizeMB) + .setCurrentRegionCachedRatio(currentRegionCachedRatio.floatValue()); r.setCompleteSequenceId(regionLoadBldr); return regionLoadBldr.build(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java index 8bcf3e600f88..8dfb8b1a4632 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java @@ -60,6 +60,10 @@ public void testRegionLoadAggregation() { metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getCpRequestCount()).sum()); assertEquals(300, metrics.getRegionMetrics().values().stream() .mapToLong(v -> v.getFilteredReadRequestCount()).sum()); + assertEquals(2, metrics.getRegionMetrics().values().stream() + .mapToLong(v -> (long) v.getCurrentRegionCachedRatio()).count()); + assertEquals(150, metrics.getRegionMetrics().values().stream() + .mapToDouble(v -> v.getRegionSizeMB().get(Size.Unit.MEGABYTE)).sum(), 0); } @Test @@ -99,12 +103,14 @@ private ClusterStatusProtos.ServerLoad createServerLoadProto() { ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10) .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520) .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201) - .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build(); - ClusterStatusProtos.RegionLoad rlTwo = ClusterStatusProtos.RegionLoad.newBuilder() - .setRegionSpecifier(rSpecTwo).setStores(3).setStorefiles(13).setStoreUncompressedSizeMB(23) - .setStorefileSizeMB(300).setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40) - .setRootIndexSizeKB(303).setReadRequestsCount(Integer.MAX_VALUE) - .setWriteRequestsCount(Integer.MAX_VALUE).setCpRequestsCount(100).build(); + .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE) + .setRegionSizeMB(100).setCurrentRegionCachedRatio(0.9f).build(); + ClusterStatusProtos.RegionLoad rlTwo = + ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3) + .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300) + .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303) + .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE) + .setCpRequestsCount(100).setRegionSizeMB(50).setCurrentRegionCachedRatio(1.0f).build(); ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder() .addRegionLoads(rlOne).addRegionLoads(rlTwo).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java index 594db4d7c303..31fcf9fd47f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java @@ -400,6 +400,10 @@ public List getTasks() { return null; } + @Override + public Map getRegionCachedInfo() { + return new HashMap<>(); + } }; return serverMetrics; } @@ -541,6 +545,16 @@ public long getBlocksTotalWeight() { public CompactionState getCompactionState() { return null; } + + @Override + public Size getRegionSizeMB() { + return null; + } + + @Override + public float getCurrentRegionCachedRatio() { + return 0.0f; + } }; return regionMetrics; }