Skip to content

Commit ee8260e

Browse files
jhungundwchevreuil
authored andcommitted
HBASE-28467: Add time-based priority caching checks for cacheOnRead code paths. (#5905)
Signed-off-by: Wellington Chevreuil <[email protected]>
1 parent e978490 commit ee8260e

File tree

3 files changed

+80
-2
lines changed

3 files changed

+80
-2
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -279,6 +279,18 @@ public boolean shouldCacheBlockOnRead(BlockCategory category) {
279279
|| (prefetchOnOpen && (category != BlockCategory.META && category != BlockCategory.UNKNOWN));
280280
}
281281

282+
public boolean shouldCacheBlockOnRead(BlockCategory category, HFileInfo hFileInfo,
283+
Configuration conf) {
284+
Optional<Boolean> cacheFileBlock = Optional.of(true);
285+
if (getBlockCache().isPresent()) {
286+
Optional<Boolean> result = getBlockCache().get().shouldCacheFile(hFileInfo, conf);
287+
if (result.isPresent()) {
288+
cacheFileBlock = result;
289+
}
290+
}
291+
return shouldCacheBlockOnRead(category) && cacheFileBlock.get();
292+
}
293+
282294
/** Returns true if blocks in this file should be flagged as in-memory */
283295
public boolean isInMemory() {
284296
return this.inMemory;

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1223,7 +1223,8 @@ public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) throws
12231223
BlockCacheKey cacheKey =
12241224
new BlockCacheKey(name, metaBlockOffset, this.isPrimaryReplicaReader(), BlockType.META);
12251225

1226-
cacheBlock &= cacheConf.shouldCacheBlockOnRead(BlockType.META.getCategory());
1226+
cacheBlock &=
1227+
cacheConf.shouldCacheBlockOnRead(BlockType.META.getCategory(), getHFileInfo(), conf);
12271228
HFileBlock cachedBlock =
12281229
getCachedBlock(cacheKey, cacheBlock, false, true, BlockType.META, null);
12291230
if (cachedBlock != null) {
@@ -1378,7 +1379,8 @@ public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, final bo
13781379
}
13791380
BlockType.BlockCategory category = hfileBlock.getBlockType().getCategory();
13801381
final boolean cacheCompressed = cacheConf.shouldCacheCompressed(category);
1381-
final boolean cacheOnRead = cacheConf.shouldCacheBlockOnRead(category);
1382+
final boolean cacheOnRead =
1383+
cacheConf.shouldCacheBlockOnRead(category, getHFileInfo(), conf);
13821384

13831385
// Don't need the unpacked block back and we're storing the block in the cache compressed
13841386
if (cacheOnly && cacheCompressed && cacheOnRead) {

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
import static org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.DEFAULT_ERROR_TOLERATION_DURATION;
2222
import static org.junit.Assert.assertEquals;
2323
import static org.junit.Assert.assertFalse;
24+
import static org.junit.Assert.assertNotNull;
2425
import static org.junit.Assert.assertNull;
2526
import static org.junit.Assert.assertTrue;
2627
import static org.junit.Assert.fail;
@@ -50,12 +51,15 @@
5051
import org.apache.hadoop.hbase.client.TableDescriptor;
5152
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
5253
import org.apache.hadoop.hbase.fs.HFileSystem;
54+
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
5355
import org.apache.hadoop.hbase.io.hfile.BlockCache;
5456
import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
5557
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
5658
import org.apache.hadoop.hbase.io.hfile.BlockType;
59+
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
5760
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
5861
import org.apache.hadoop.hbase.io.hfile.CacheTestUtils;
62+
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
5963
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
6064
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
6165
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
@@ -565,6 +569,66 @@ public void testFeatureKeyDisabled() throws Exception {
565569
}
566570
}
567571

572+
@Test
573+
public void testCacheConfigShouldCacheFile() throws Exception {
574+
// Evict the files from cache.
575+
for (HStoreFile file : hStoreFiles) {
576+
file.closeStoreFile(true);
577+
}
578+
// Verify that the API shouldCacheFileBlock returns the result correctly.
579+
// hStoreFiles[0], hStoreFiles[1], hStoreFiles[2] are hot files.
580+
// hStoreFiles[3] is a cold file.
581+
try {
582+
assertTrue(cacheConf.shouldCacheBlockOnRead(BlockCategory.DATA,
583+
hStoreFiles.get(0).getFileInfo().getHFileInfo(),
584+
hStoreFiles.get(0).getFileInfo().getConf()));
585+
assertTrue(cacheConf.shouldCacheBlockOnRead(BlockCategory.DATA,
586+
hStoreFiles.get(1).getFileInfo().getHFileInfo(),
587+
hStoreFiles.get(1).getFileInfo().getConf()));
588+
assertTrue(cacheConf.shouldCacheBlockOnRead(BlockCategory.DATA,
589+
hStoreFiles.get(2).getFileInfo().getHFileInfo(),
590+
hStoreFiles.get(2).getFileInfo().getConf()));
591+
assertFalse(cacheConf.shouldCacheBlockOnRead(BlockCategory.DATA,
592+
hStoreFiles.get(3).getFileInfo().getHFileInfo(),
593+
hStoreFiles.get(3).getFileInfo().getConf()));
594+
} finally {
595+
for (HStoreFile file : hStoreFiles) {
596+
file.initReader();
597+
}
598+
}
599+
}
600+
601+
@Test
602+
public void testCacheOnReadColdFile() throws Exception {
603+
// hStoreFiles[3] is a cold file. the blocks should not get loaded after a readBlock call.
604+
HStoreFile hStoreFile = hStoreFiles.get(3);
605+
BlockCacheKey cacheKey = new BlockCacheKey(hStoreFile.getPath(), 0, true, BlockType.DATA);
606+
testCacheOnRead(hStoreFile, cacheKey, 23025, false);
607+
}
608+
609+
@Test
610+
public void testCacheOnReadHotFile() throws Exception {
611+
// hStoreFiles[0] is a hot file. the blocks should get loaded after a readBlock call.
612+
HStoreFile hStoreFile = hStoreFiles.get(0);
613+
BlockCacheKey cacheKey =
614+
new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA);
615+
testCacheOnRead(hStoreFile, cacheKey, 23025, true);
616+
}
617+
618+
private void testCacheOnRead(HStoreFile hStoreFile, BlockCacheKey key, long onDiskBlockSize,
619+
boolean expectedCached) throws Exception {
620+
// Execute the read block API which will try to cache the block if the block is a hot block.
621+
hStoreFile.getReader().getHFileReader().readBlock(key.getOffset(), onDiskBlockSize, true, false,
622+
false, false, key.getBlockType(), DataBlockEncoding.NONE);
623+
// Validate that the hot block gets cached and cold block is not cached.
624+
HFileBlock block = (HFileBlock) blockCache.getBlock(key, false, false, false, BlockType.DATA);
625+
if (expectedCached) {
626+
assertNotNull(block);
627+
} else {
628+
assertNull(block);
629+
}
630+
}
631+
568632
private void validateBlocks(Set<BlockCacheKey> keys, int expectedTotalKeys, int expectedHotBlocks,
569633
int expectedColdBlocks) {
570634
int numHotBlocks = 0, numColdBlocks = 0;

0 commit comments

Comments
 (0)