|  | 
| 18 | 18 | package org.apache.hadoop.hbase.regionserver; | 
| 19 | 19 | 
 | 
| 20 | 20 | import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; | 
|  | 21 | +import static org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.DEFAULT_ERROR_TOLERATION_DURATION; | 
| 21 | 22 | import static org.junit.Assert.assertEquals; | 
| 22 | 23 | import static org.junit.Assert.assertTrue; | 
| 23 | 24 | import static org.junit.Assert.fail; | 
|  | 
| 51 | 52 | import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; | 
| 52 | 53 | import org.apache.hadoop.hbase.io.hfile.BlockType; | 
| 53 | 54 | import org.apache.hadoop.hbase.io.hfile.CacheConfig; | 
|  | 55 | +import org.apache.hadoop.hbase.io.hfile.CacheTestUtils; | 
| 54 | 56 | import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; | 
|  | 57 | +import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; | 
| 55 | 58 | import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; | 
| 56 | 59 | import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; | 
| 57 | 60 | import org.apache.hadoop.hbase.testclassification.RegionServerTests; | 
| @@ -247,6 +250,181 @@ public void testColdDataFiles() { | 
| 247 | 250 |     } | 
| 248 | 251 |   } | 
| 249 | 252 | 
 | 
|  | 253 | +  @Test | 
|  | 254 | +  public void testPickColdDataFiles() { | 
|  | 255 | +    Map<String, String> coldDataFiles = dataTieringManager.getColdFilesList(); | 
|  | 256 | +    assertEquals(1, coldDataFiles.size()); | 
|  | 257 | +    // hStoreFiles[3] is the cold file. | 
|  | 258 | +    assert (coldDataFiles.containsKey(hStoreFiles.get(3).getFileInfo().getActiveFileName())); | 
|  | 259 | +  } | 
|  | 260 | + | 
|  | 261 | +  /* | 
|  | 262 | +   * Verify that two cold blocks(both) are evicted when bucket reaches its capacity. The hot file | 
|  | 263 | +   * remains in the cache. | 
|  | 264 | +   */ | 
|  | 265 | +  @Test | 
|  | 266 | +  public void testBlockEvictions() throws Exception { | 
|  | 267 | +    long capacitySize = 40 * 1024; | 
|  | 268 | +    int writeThreads = 3; | 
|  | 269 | +    int writerQLen = 64; | 
|  | 270 | +    int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; | 
|  | 271 | + | 
|  | 272 | +    // Setup: Create a bucket cache with lower capacity | 
|  | 273 | +    BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, | 
|  | 274 | +      8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", | 
|  | 275 | +      DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); | 
|  | 276 | + | 
|  | 277 | +    // Create three Cache keys with cold data files and a block with hot data. | 
|  | 278 | +    // hStoreFiles.get(3) is a cold data file, while hStoreFiles.get(0) is a hot file. | 
|  | 279 | +    Set<BlockCacheKey> cacheKeys = new HashSet<>(); | 
|  | 280 | +    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); | 
|  | 281 | +    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, BlockType.DATA)); | 
|  | 282 | +    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA)); | 
|  | 283 | + | 
|  | 284 | +    // Create dummy data to be cached and fill the cache completely. | 
|  | 285 | +    CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); | 
|  | 286 | + | 
|  | 287 | +    int blocksIter = 0; | 
|  | 288 | +    for (BlockCacheKey key : cacheKeys) { | 
|  | 289 | +      bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); | 
|  | 290 | +      // Ensure that the block is persisted to the file. | 
|  | 291 | +      Waiter.waitFor(defaultConf, 10000, 100, () -> (bucketCache.getBackingMap().containsKey(key))); | 
|  | 292 | +    } | 
|  | 293 | + | 
|  | 294 | +    // Verify that the bucket cache contains 3 blocks. | 
|  | 295 | +    assertEquals(3, bucketCache.getBackingMap().keySet().size()); | 
|  | 296 | + | 
|  | 297 | +    // Add an additional block into cache with hot data which should trigger the eviction | 
|  | 298 | +    BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); | 
|  | 299 | +    CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); | 
|  | 300 | + | 
|  | 301 | +    bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); | 
|  | 302 | +    Waiter.waitFor(defaultConf, 10000, 100, | 
|  | 303 | +      () -> (bucketCache.getBackingMap().containsKey(newKey))); | 
|  | 304 | + | 
|  | 305 | +    // Verify that the bucket cache now contains 2 hot blocks blocks only. | 
|  | 306 | +    // Both cold blocks of 8KB will be evicted to make room for 1 block of 8KB + an additional | 
|  | 307 | +    // space. | 
|  | 308 | +    validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0); | 
|  | 309 | +  } | 
|  | 310 | + | 
|  | 311 | +  /* | 
|  | 312 | +   * Verify that two cold blocks(both) are evicted when bucket reaches its capacity, but one cold | 
|  | 313 | +   * block remains in the cache since the required space is freed. | 
|  | 314 | +   */ | 
|  | 315 | +  @Test | 
|  | 316 | +  public void testBlockEvictionsAllColdBlocks() throws Exception { | 
|  | 317 | +    long capacitySize = 40 * 1024; | 
|  | 318 | +    int writeThreads = 3; | 
|  | 319 | +    int writerQLen = 64; | 
|  | 320 | +    int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; | 
|  | 321 | + | 
|  | 322 | +    // Setup: Create a bucket cache with lower capacity | 
|  | 323 | +    BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, | 
|  | 324 | +      8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", | 
|  | 325 | +      DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); | 
|  | 326 | + | 
|  | 327 | +    // Create three Cache keys with three cold data blocks. | 
|  | 328 | +    // hStoreFiles.get(3) is a cold data file. | 
|  | 329 | +    Set<BlockCacheKey> cacheKeys = new HashSet<>(); | 
|  | 330 | +    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); | 
|  | 331 | +    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, BlockType.DATA)); | 
|  | 332 | +    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 16384, true, BlockType.DATA)); | 
|  | 333 | + | 
|  | 334 | +    // Create dummy data to be cached and fill the cache completely. | 
|  | 335 | +    CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); | 
|  | 336 | + | 
|  | 337 | +    int blocksIter = 0; | 
|  | 338 | +    for (BlockCacheKey key : cacheKeys) { | 
|  | 339 | +      bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); | 
|  | 340 | +      // Ensure that the block is persisted to the file. | 
|  | 341 | +      Waiter.waitFor(defaultConf, 10000, 100, () -> (bucketCache.getBackingMap().containsKey(key))); | 
|  | 342 | +    } | 
|  | 343 | + | 
|  | 344 | +    // Verify that the bucket cache contains 3 blocks. | 
|  | 345 | +    assertEquals(3, bucketCache.getBackingMap().keySet().size()); | 
|  | 346 | + | 
|  | 347 | +    // Add an additional block into cache with hot data which should trigger the eviction | 
|  | 348 | +    BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); | 
|  | 349 | +    CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); | 
|  | 350 | + | 
|  | 351 | +    bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); | 
|  | 352 | +    Waiter.waitFor(defaultConf, 10000, 100, | 
|  | 353 | +      () -> (bucketCache.getBackingMap().containsKey(newKey))); | 
|  | 354 | + | 
|  | 355 | +    // Verify that the bucket cache now contains 1 cold block and a newly added hot block. | 
|  | 356 | +    validateBlocks(bucketCache.getBackingMap().keySet(), 2, 1, 1); | 
|  | 357 | +  } | 
|  | 358 | + | 
|  | 359 | +  /* | 
|  | 360 | +   * Verify that a hot block evicted along with a cold block when bucket reaches its capacity. | 
|  | 361 | +   */ | 
|  | 362 | +  @Test | 
|  | 363 | +  public void testBlockEvictionsHotBlocks() throws Exception { | 
|  | 364 | +    long capacitySize = 40 * 1024; | 
|  | 365 | +    int writeThreads = 3; | 
|  | 366 | +    int writerQLen = 64; | 
|  | 367 | +    int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; | 
|  | 368 | + | 
|  | 369 | +    // Setup: Create a bucket cache with lower capacity | 
|  | 370 | +    BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, | 
|  | 371 | +      8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", | 
|  | 372 | +      DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); | 
|  | 373 | + | 
|  | 374 | +    // Create three Cache keys with two hot data blocks and one cold data block | 
|  | 375 | +    // hStoreFiles.get(0) is a hot data file and hStoreFiles.get(3) is a cold data file. | 
|  | 376 | +    Set<BlockCacheKey> cacheKeys = new HashSet<>(); | 
|  | 377 | +    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA)); | 
|  | 378 | +    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 8192, true, BlockType.DATA)); | 
|  | 379 | +    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); | 
|  | 380 | + | 
|  | 381 | +    // Create dummy data to be cached and fill the cache completely. | 
|  | 382 | +    CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); | 
|  | 383 | + | 
|  | 384 | +    int blocksIter = 0; | 
|  | 385 | +    for (BlockCacheKey key : cacheKeys) { | 
|  | 386 | +      bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); | 
|  | 387 | +      // Ensure that the block is persisted to the file. | 
|  | 388 | +      Waiter.waitFor(defaultConf, 10000, 100, () -> (bucketCache.getBackingMap().containsKey(key))); | 
|  | 389 | +    } | 
|  | 390 | + | 
|  | 391 | +    // Verify that the bucket cache contains 3 blocks. | 
|  | 392 | +    assertEquals(3, bucketCache.getBackingMap().keySet().size()); | 
|  | 393 | + | 
|  | 394 | +    // Add an additional block which should evict the only cold block with an additional hot block. | 
|  | 395 | +    BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); | 
|  | 396 | +    CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); | 
|  | 397 | + | 
|  | 398 | +    bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); | 
|  | 399 | +    Waiter.waitFor(defaultConf, 10000, 100, | 
|  | 400 | +      () -> (bucketCache.getBackingMap().containsKey(newKey))); | 
|  | 401 | + | 
|  | 402 | +    // Verify that the bucket cache now contains 2 hot blocks. | 
|  | 403 | +    // Only one of the older hot blocks is retained and other one is the newly added hot block. | 
|  | 404 | +    validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0); | 
|  | 405 | +  } | 
|  | 406 | + | 
|  | 407 | +  private void validateBlocks(Set<BlockCacheKey> keys, int expectedTotalKeys, int expectedHotBlocks, | 
|  | 408 | +    int expectedColdBlocks) { | 
|  | 409 | +    int numHotBlocks = 0, numColdBlocks = 0; | 
|  | 410 | + | 
|  | 411 | +    assertEquals(expectedTotalKeys, keys.size()); | 
|  | 412 | +    int iter = 0; | 
|  | 413 | +    for (BlockCacheKey key : keys) { | 
|  | 414 | +      try { | 
|  | 415 | +        if (dataTieringManager.isHotData(key)) { | 
|  | 416 | +          numHotBlocks++; | 
|  | 417 | +        } else { | 
|  | 418 | +          numColdBlocks++; | 
|  | 419 | +        } | 
|  | 420 | +      } catch (Exception e) { | 
|  | 421 | +        fail("Unexpected exception!"); | 
|  | 422 | +      } | 
|  | 423 | +    } | 
|  | 424 | +    assertEquals(expectedHotBlocks, numHotBlocks); | 
|  | 425 | +    assertEquals(expectedColdBlocks, numColdBlocks); | 
|  | 426 | +  } | 
|  | 427 | + | 
| 250 | 428 |   private void testDataTieringMethodWithPath(DataTieringMethodCallerWithPath caller, Path path, | 
| 251 | 429 |     boolean expectedResult, DataTieringException exception) { | 
| 252 | 430 |     try { | 
|  | 
0 commit comments