From 501c8277623b8fc5b4bd4b227301a0d887b7d3d1 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Mon, 15 Jul 2019 17:20:01 -0700 Subject: [PATCH 1/9] HDDS-1802. Add Eviction policy for table cache. --- .../org/apache/hadoop/utils/db/DBStore.java | 14 +++- .../org/apache/hadoop/utils/db/RDBStore.java | 9 +++ .../apache/hadoop/utils/db/TypedTable.java | 75 ++++++++++++++++--- ...ialTableCache.java => TableCacheImpl.java} | 31 ++++++-- ...ableCache.java => TestTableCacheImpl.java} | 71 +++++++++++++----- .../ozone/om/OmMetadataManagerImpl.java | 7 +- 6 files changed, 168 insertions(+), 39 deletions(-) rename hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/{PartialTableCache.java => TableCacheImpl.java} (77%) rename hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/{TestPartialTableCache.java => TestTableCacheImpl.java} (66%) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java index d01dfe44d30c4..95e57d9d7216d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java @@ -25,6 +25,7 @@ import java.util.Map; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.utils.db.cache.TableCacheImpl; /** * The DBStore interface provides the ability to create Tables, which store @@ -47,7 +48,9 @@ public interface DBStore extends AutoCloseable { /** - * Gets an existing TableStore with implicit key/value conversion. + * Gets an existing TableStore with implicit key/value conversion and + * with default cleanup policy for cache. Default policy is cache cleanup + * after flush to DB is completed. * * @param name - Name of the TableStore to get * @param keyType @@ -58,6 +61,15 @@ public interface DBStore extends AutoCloseable { Table getTable(String name, Class keyType, Class valueType) throws IOException; + /** + * Gets an existing TableStore with implicit key/value conversion and + * with specified cleanup policy for cache. + * @throws IOException + */ + Table getTable(String name, + Class keyType, Class valueType, + TableCacheImpl.CacheCleanupPolicy cleanupPolicy) throws IOException; + /** * Lists the Known list of Tables in a DB. * diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java index 27862c7847653..23c03f1884b7c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java @@ -38,6 +38,7 @@ import org.apache.hadoop.utils.RocksDBStoreMBean; import com.google.common.base.Preconditions; +import org.apache.hadoop.utils.db.cache.TableCacheImpl; import org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; @@ -260,6 +261,14 @@ public Table getTable(String name, valueType); } + @Override + public Table getTable(String name, + Class keyType, Class valueType, + TableCacheImpl.CacheCleanupPolicy cleanupPolicy) throws IOException { + return new TypedTable(getTable(name), codecRegistry, keyType, + valueType, cleanupPolicy); + } + @Override public ArrayList listTables() throws IOException { ArrayList
returnList = new ArrayList<>(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java index 05f73b847276d..3af498ca5c5de 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java @@ -23,9 +23,10 @@ import java.util.Map; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Optional; import org.apache.hadoop.utils.db.cache.CacheKey; import org.apache.hadoop.utils.db.cache.CacheValue; -import org.apache.hadoop.utils.db.cache.PartialTableCache; +import org.apache.hadoop.utils.db.cache.TableCacheImpl; import org.apache.hadoop.utils.db.cache.TableCache; /** @@ -49,7 +50,17 @@ public class TypedTable implements Table { private final TableCache, CacheValue> cache; + private final TableCacheImpl.CacheCleanupPolicy cacheCleanupPolicy; + + /** + * Create an TypedTable from the raw table. + * Default cleanup policy used for the table is cleanup after flush. + * @param rawTable + * @param codecRegistry + * @param keyType + * @param valueType + */ public TypedTable( Table rawTable, CodecRegistry codecRegistry, Class keyType, @@ -58,7 +69,29 @@ public TypedTable( this.codecRegistry = codecRegistry; this.keyType = keyType; this.valueType = valueType; - cache = new PartialTableCache<>(); + this.cacheCleanupPolicy = TableCacheImpl.CacheCleanupPolicy.AFTERFLUSH; + cache = new TableCacheImpl<>(cacheCleanupPolicy); + } + + /** + * Create an TypedTable from the raw table with specified cleanup policy + * for table cache. + * @param rawTable + * @param codecRegistry + * @param keyType + * @param valueType + * @param cleanupPolicy + */ + public TypedTable( + Table rawTable, + CodecRegistry codecRegistry, Class keyType, + Class valueType, TableCacheImpl.CacheCleanupPolicy cleanupPolicy) { + this.rawTable = rawTable; + this.codecRegistry = codecRegistry; + this.keyType = keyType; + this.valueType = valueType; + this.cacheCleanupPolicy = cleanupPolicy; + cache = new TableCacheImpl<>(cacheCleanupPolicy); } @Override @@ -83,11 +116,22 @@ public boolean isEmpty() throws IOException { @Override public boolean isExist(KEY key) throws IOException { - CacheValue cacheValue= cache.get(new CacheKey<>(key)); - return (cacheValue != null && cacheValue.getCacheValue() != null) || + + if (cacheCleanupPolicy == TableCacheImpl.CacheCleanupPolicy.NEVER) { + return isExistForCleanupPolicyNever(key); + } + + return isExistForCleanupPolicyNever(key) || rawTable.isExist(codecRegistry.asRawData(key)); } + private boolean isExistForCleanupPolicyNever(KEY key) { + // If cache cleanup policy is NEVER, entire table is cached. So, no need + // to check from DB. + CacheValue cacheValue= cache.get(new CacheKey<>(key)); + return (cacheValue != null && cacheValue.getCacheValue() != null); + } + /** * Returns the value mapped to the given key in byte array or returns null * if the key is not found. @@ -104,14 +148,25 @@ public boolean isExist(KEY key) throws IOException { public VALUE get(KEY key) throws IOException { // Here the metadata lock will guarantee that cache is not updated for same // key during get key. - CacheValue< VALUE > cacheValue = cache.get(new CacheKey<>(key)); - if (cacheValue == null) { - // If no cache for the table or if it does not exist in cache get from - // RocksDB table. + + // First get from cache. If it has return value. + // If it does not have + // If cache cleanup policy is NEVER return null. Because cache here is + // full table data in-memory, so no need to get from underlying rocksdb + // table. + // If cache cleanup policy is AFTERFLUSH return from underlying rocksdb + // table. As it might have been cleaned up from cache, might be there in + // DB. + CacheValue cacheValue = + Optional.fromNullable(cache.get(new CacheKey<>(key))).orNull(); + if (cacheValue != null) { + return cacheValue.getCacheValue(); + } + + if (cacheCleanupPolicy == TableCacheImpl.CacheCleanupPolicy.AFTERFLUSH) { return getFromTable(key); } else { - // We have a value in cache, return the value. - return cacheValue.getCacheValue(); + return null; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java similarity index 77% rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java index 7d16d04df194a..5e14e2ab407ad 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java @@ -32,21 +32,28 @@ import org.apache.hadoop.classification.InterfaceStability.Evolving; /** - * Cache implementation for the table, this cache is partial cache, this will - * be cleaned up, after entries are flushed to DB. + * Cache implementation for the table. Depending on the cache clean up policy + * this cache will be full cache or partial cache. + * + * If cache cleanup policy is set as {@link CacheCleanupPolicy#AFTERFLUSH}, + * this will be a partial cache. + * + * If cache cleanup policy is set as {@link CacheCleanupPolicy#NEVER}, + * this will be a full cache. */ @Private @Evolving -public class PartialTableCache implements TableCache { private final ConcurrentHashMap cache; private final TreeSet> epochEntries; private ExecutorService executorService; + private CacheCleanupPolicy cleanupPolicy; - public PartialTableCache() { + public TableCacheImpl(CacheCleanupPolicy cleanupPolicy) { cache = new ConcurrentHashMap<>(); epochEntries = new TreeSet<>(); // Created a singleThreadExecutor, so one cleanup will be running at a @@ -54,7 +61,7 @@ public PartialTableCache() { ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true) .setNameFormat("PartialTableCache Cleanup Thread - %d").build(); executorService = Executors.newSingleThreadExecutor(build); - + this.cleanupPolicy = cleanupPolicy; } @Override @@ -70,7 +77,10 @@ public void put(CACHEKEY cacheKey, CACHEVALUE value) { @Override public void cleanup(long epoch) { - executorService.submit(() -> evictCache(epoch)); + // If it is never do nothing. + if (cleanupPolicy == CacheCleanupPolicy.AFTERFLUSH) { + executorService.submit(() -> evictCache(epoch)); + } } @Override @@ -103,4 +113,13 @@ private void evictCache(long epoch) { } } } + + /** + * Cleanup policies for table cache. + */ + public enum CacheCleanupPolicy { + NEVER, // Cache will not be cleaned up. This mean's the table maintains + // full cache. + AFTERFLUSH // Cache will be cleaned up, once after flushing to DB. + } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestTableCacheImpl.java similarity index 66% rename from hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java rename to hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestTableCacheImpl.java index 736ae1b6161b5..8aeb5f78abb7d 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestTableCacheImpl.java @@ -19,6 +19,8 @@ package org.apache.hadoop.utils.db.cache; +import java.util.Arrays; +import java.util.Collection; import java.util.concurrent.CompletableFuture; import com.google.common.base.Optional; @@ -26,18 +28,40 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import static org.junit.Assert.fail; /** * Class tests partial table cache. */ -public class TestPartialTableCache { +@RunWith(value = Parameterized.class) +public class TestTableCacheImpl { private TableCache, CacheValue> tableCache; + private final TableCacheImpl.CacheCleanupPolicy cacheCleanupPolicy; + + + @Parameterized.Parameters + public static Collection policy() { + Object[][] params = new Object[][] { + {TableCacheImpl.CacheCleanupPolicy.NEVER}, + {TableCacheImpl.CacheCleanupPolicy.AFTERFLUSH} + }; + return Arrays.asList(params); + } + + public TestTableCacheImpl( + TableCacheImpl.CacheCleanupPolicy cacheCleanupPolicy) { + this.cacheCleanupPolicy = cacheCleanupPolicy; + } + + @Before public void create() { - tableCache = new PartialTableCache<>(); + tableCache = + new TableCacheImpl<>(cacheCleanupPolicy); } @Test public void testPartialTableCache() { @@ -98,33 +122,40 @@ public void testPartialTableCacheParallel() throws Exception { tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); } - int deleted = 5; - // cleanup first 5 entires - tableCache.cleanup(deleted); value = future.get(); Assert.assertEquals(10, value); totalCount += value; - // We should totalCount - deleted entries in cache. - final int tc = totalCount; - GenericTestUtils.waitFor(() -> (tc - deleted == tableCache.size()), 100, - 5000); - - // Check if we have remaining entries. - for (int i=6; i <= totalCount; i++) { - Assert.assertEquals(Integer.toString(i), - tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); + if (cacheCleanupPolicy == TableCacheImpl.CacheCleanupPolicy.AFTERFLUSH) { + int deleted = 5; + + // cleanup first 5 entires + tableCache.cleanup(deleted); + + // We should totalCount - deleted entries in cache. + final int tc = totalCount; + GenericTestUtils.waitFor(() -> (tc - deleted == tableCache.size()), 100, + 5000); + // Check if we have remaining entries. + for (int i=6; i <= totalCount; i++) { + Assert.assertEquals(Integer.toString(i), tableCache.get( + new CacheKey<>(Integer.toString(i))).getCacheValue()); + } + tableCache.cleanup(10); + + tableCache.cleanup(totalCount); + + // Cleaned up all entries, so cache size should be zero. + GenericTestUtils.waitFor(() -> (0 == tableCache.size()), 100, + 5000); + } else { + tableCache.cleanup(totalCount); + Assert.assertEquals(totalCount, tableCache.size()); } - tableCache.cleanup(10); - - tableCache.cleanup(totalCount); - // Cleaned up all entries, so cache size should be zero. - GenericTestUtils.waitFor(() -> (0 == tableCache.size()), 100, - 5000); } private int writeToCache(int count, int startVal, long sleep) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 88fd24cf42cb3..c7d6bb422bbf0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -69,6 +69,7 @@ import org.apache.hadoop.utils.db.TypedTable; import org.apache.hadoop.utils.db.cache.CacheKey; import org.apache.hadoop.utils.db.cache.CacheValue; +import org.apache.hadoop.utils.db.cache.TableCacheImpl; import org.eclipse.jetty.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -269,11 +270,13 @@ protected void initializeOmTables() throws IOException { this.store.getTable(USER_TABLE, String.class, VolumeList.class); checkTableStatus(userTable, USER_TABLE); volumeTable = - this.store.getTable(VOLUME_TABLE, String.class, OmVolumeArgs.class); + this.store.getTable(VOLUME_TABLE, String.class, OmVolumeArgs.class, + TableCacheImpl.CacheCleanupPolicy.NEVER); checkTableStatus(volumeTable, VOLUME_TABLE); bucketTable = - this.store.getTable(BUCKET_TABLE, String.class, OmBucketInfo.class); + this.store.getTable(BUCKET_TABLE, String.class, OmBucketInfo.class, + TableCacheImpl.CacheCleanupPolicy.NEVER); checkTableStatus(bucketTable, BUCKET_TABLE); From 7e0aeac79d7bfdd46abe8ae2bf2d85f7b0524a89 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Mon, 15 Jul 2019 17:22:01 -0700 Subject: [PATCH 2/9] rename AfterFlush --- .../main/java/org/apache/hadoop/utils/db/TypedTable.java | 6 +++--- .../org/apache/hadoop/utils/db/cache/TableCacheImpl.java | 6 +++--- .../apache/hadoop/utils/db/cache/TestTableCacheImpl.java | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java index 3af498ca5c5de..76438a05f66e0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java @@ -69,7 +69,7 @@ public TypedTable( this.codecRegistry = codecRegistry; this.keyType = keyType; this.valueType = valueType; - this.cacheCleanupPolicy = TableCacheImpl.CacheCleanupPolicy.AFTERFLUSH; + this.cacheCleanupPolicy = TableCacheImpl.CacheCleanupPolicy.AFTER_FLUSH; cache = new TableCacheImpl<>(cacheCleanupPolicy); } @@ -154,7 +154,7 @@ public VALUE get(KEY key) throws IOException { // If cache cleanup policy is NEVER return null. Because cache here is // full table data in-memory, so no need to get from underlying rocksdb // table. - // If cache cleanup policy is AFTERFLUSH return from underlying rocksdb + // If cache cleanup policy is AFTER_FLUSH return from underlying rocksdb // table. As it might have been cleaned up from cache, might be there in // DB. CacheValue cacheValue = @@ -163,7 +163,7 @@ public VALUE get(KEY key) throws IOException { return cacheValue.getCacheValue(); } - if (cacheCleanupPolicy == TableCacheImpl.CacheCleanupPolicy.AFTERFLUSH) { + if (cacheCleanupPolicy == TableCacheImpl.CacheCleanupPolicy.AFTER_FLUSH) { return getFromTable(key); } else { return null; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java index 5e14e2ab407ad..d6c1bb9f92283 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java @@ -35,7 +35,7 @@ * Cache implementation for the table. Depending on the cache clean up policy * this cache will be full cache or partial cache. * - * If cache cleanup policy is set as {@link CacheCleanupPolicy#AFTERFLUSH}, + * If cache cleanup policy is set as {@link CacheCleanupPolicy#AFTER_FLUSH}, * this will be a partial cache. * * If cache cleanup policy is set as {@link CacheCleanupPolicy#NEVER}, @@ -78,7 +78,7 @@ public void put(CACHEKEY cacheKey, CACHEVALUE value) { @Override public void cleanup(long epoch) { // If it is never do nothing. - if (cleanupPolicy == CacheCleanupPolicy.AFTERFLUSH) { + if (cleanupPolicy == CacheCleanupPolicy.AFTER_FLUSH) { executorService.submit(() -> evictCache(epoch)); } } @@ -120,6 +120,6 @@ private void evictCache(long epoch) { public enum CacheCleanupPolicy { NEVER, // Cache will not be cleaned up. This mean's the table maintains // full cache. - AFTERFLUSH // Cache will be cleaned up, once after flushing to DB. + AFTER_FLUSH // Cache will be cleaned up, once after flushing to DB. } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestTableCacheImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestTableCacheImpl.java index 8aeb5f78abb7d..e8dc8154afdf5 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestTableCacheImpl.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestTableCacheImpl.java @@ -47,7 +47,7 @@ public class TestTableCacheImpl { public static Collection policy() { Object[][] params = new Object[][] { {TableCacheImpl.CacheCleanupPolicy.NEVER}, - {TableCacheImpl.CacheCleanupPolicy.AFTERFLUSH} + {TableCacheImpl.CacheCleanupPolicy.AFTER_FLUSH} }; return Arrays.asList(params); } @@ -128,7 +128,7 @@ public void testPartialTableCacheParallel() throws Exception { totalCount += value; - if (cacheCleanupPolicy == TableCacheImpl.CacheCleanupPolicy.AFTERFLUSH) { + if (cacheCleanupPolicy == TableCacheImpl.CacheCleanupPolicy.AFTER_FLUSH) { int deleted = 5; // cleanup first 5 entires From 53bf346bbe422ca9fd643f6db4995f748a30442d Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Tue, 16 Jul 2019 18:22:12 -0700 Subject: [PATCH 3/9] fixing review comments and loading up cache after restart. --- .../apache/hadoop/utils/db/TypedTable.java | 78 ++++++++++--------- .../hadoop/utils/db/cache/CacheResult.java | 58 ++++++++++++++ .../hadoop/utils/db/cache/TableCache.java | 31 +++++++- .../hadoop/utils/db/cache/TableCacheImpl.java | 60 +++++++++++--- .../utils/db/cache/TestTableCacheImpl.java | 4 +- 5 files changed, 180 insertions(+), 51 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheResult.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java index 76438a05f66e0..99ebd5b372abb 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java @@ -25,10 +25,15 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import org.apache.hadoop.utils.db.cache.CacheKey; +import org.apache.hadoop.utils.db.cache.CacheResult; import org.apache.hadoop.utils.db.cache.CacheValue; import org.apache.hadoop.utils.db.cache.TableCacheImpl; import org.apache.hadoop.utils.db.cache.TableCache; +import org.apache.hadoop.utils.db.cache.TableCacheImpl.CacheCleanupPolicy; +import static org.apache.hadoop.utils.db.cache.CacheResult.CacheStatus.EXISTS; +import static org.apache.hadoop.utils.db.cache.CacheResult.CacheStatus.NOT_EXIST; +import static org.apache.hadoop.utils.db.cache.CacheResult.CacheStatus.CHECK_IN_TABLE; /** * Strongly typed table implementation. *

@@ -50,12 +55,12 @@ public class TypedTable implements Table { private final TableCache, CacheValue> cache; - private final TableCacheImpl.CacheCleanupPolicy cacheCleanupPolicy; - + private final static long EPOCH_DEFAULT = -1L; /** * Create an TypedTable from the raw table. - * Default cleanup policy used for the table is cleanup after flush. + * Default cleanup policy used for the table is + * {@link CacheCleanupPolicy#MANUAL}. * @param rawTable * @param codecRegistry * @param keyType @@ -69,8 +74,7 @@ public TypedTable( this.codecRegistry = codecRegistry; this.keyType = keyType; this.valueType = valueType; - this.cacheCleanupPolicy = TableCacheImpl.CacheCleanupPolicy.AFTER_FLUSH; - cache = new TableCacheImpl<>(cacheCleanupPolicy); + cache = new TableCacheImpl<>(TableCacheImpl.CacheCleanupPolicy.MANUAL); } /** @@ -85,13 +89,27 @@ public TypedTable( public TypedTable( Table rawTable, CodecRegistry codecRegistry, Class keyType, - Class valueType, TableCacheImpl.CacheCleanupPolicy cleanupPolicy) { + Class valueType, + TableCacheImpl.CacheCleanupPolicy cleanupPolicy) throws IOException { this.rawTable = rawTable; this.codecRegistry = codecRegistry; this.keyType = keyType; this.valueType = valueType; - this.cacheCleanupPolicy = cleanupPolicy; - cache = new TableCacheImpl<>(cacheCleanupPolicy); + cache = new TableCacheImpl<>(cleanupPolicy); + + if (cleanupPolicy == CacheCleanupPolicy.NEVER) { + //fill cache + try(TableIterator> tableIterator = + iterator()) { + KeyValue kv = tableIterator.next(); + + // We should build cache after OM restart when clean up policy is NEVER. + // Setting epoch value -1, so that when it is marked for delete, this + // will be considered for cleanup. + cache.put(new CacheKey<>(kv.getKey()), + new CacheValue<>(Optional.of(kv.getValue()), EPOCH_DEFAULT)); + } + } } @Override @@ -117,19 +135,16 @@ public boolean isEmpty() throws IOException { @Override public boolean isExist(KEY key) throws IOException { - if (cacheCleanupPolicy == TableCacheImpl.CacheCleanupPolicy.NEVER) { - return isExistForCleanupPolicyNever(key); - } + CacheResult> cacheResult = + cache.isExist(new CacheKey<>(key)); - return isExistForCleanupPolicyNever(key) || - rawTable.isExist(codecRegistry.asRawData(key)); - } - - private boolean isExistForCleanupPolicyNever(KEY key) { - // If cache cleanup policy is NEVER, entire table is cached. So, no need - // to check from DB. - CacheValue cacheValue= cache.get(new CacheKey<>(key)); - return (cacheValue != null && cacheValue.getCacheValue() != null); + if (cacheResult.getCacheStatus() == EXISTS) { + return true; + } else if (cacheResult.getCacheStatus() == NOT_EXIST) { + return false; + } else { + return rawTable.isExist(codecRegistry.asRawData(key)); + } } /** @@ -149,24 +164,15 @@ public VALUE get(KEY key) throws IOException { // Here the metadata lock will guarantee that cache is not updated for same // key during get key. - // First get from cache. If it has return value. - // If it does not have - // If cache cleanup policy is NEVER return null. Because cache here is - // full table data in-memory, so no need to get from underlying rocksdb - // table. - // If cache cleanup policy is AFTER_FLUSH return from underlying rocksdb - // table. As it might have been cleaned up from cache, might be there in - // DB. - CacheValue cacheValue = - Optional.fromNullable(cache.get(new CacheKey<>(key))).orNull(); - if (cacheValue != null) { - return cacheValue.getCacheValue(); - } + CacheResult> cacheResult = + cache.isExist(new CacheKey<>(key)); - if (cacheCleanupPolicy == TableCacheImpl.CacheCleanupPolicy.AFTER_FLUSH) { - return getFromTable(key); - } else { + if (cacheResult.getCacheStatus() == EXISTS) { + return cacheResult.getValue().getCacheValue(); + } else if (cacheResult.getCacheStatus() == NOT_EXIST) { return null; + } else { + return getFromTable(key); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheResult.java new file mode 100644 index 0000000000000..1c54f11a3bf6c --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheResult.java @@ -0,0 +1,58 @@ +package org.apache.hadoop.utils.db.cache; + +import java.util.Objects; + +/** + * CacheResult which is returned as response for Key exist in cache or not. + * @param + */ +public class CacheResult { + + private CacheStatus cacheStatus; + private CACHEVALUE cachevalue; + + public CacheResult(CacheStatus cacheStatus, CACHEVALUE cachevalue) { + this.cacheStatus = cacheStatus; + this.cachevalue = cachevalue; + } + + public CacheStatus getCacheStatus() { + return cacheStatus; + } + + public CACHEVALUE getValue() { + return cachevalue; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CacheResult< ? > that = (CacheResult< ? >) o; + return cacheStatus == that.cacheStatus && + Objects.equals(cachevalue, that.cachevalue); + } + + @Override + public int hashCode() { + return Objects.hash(cacheStatus, cachevalue); + } + + /** + * Status which tells whether key exists in cache or not. + */ + public enum CacheStatus { + EXISTS, // When key exists in cache. + + NOT_EXIST, // We guarantee that it does not exist. This will be returned + // when the key does not exist in cache, when cache clean up policy is + // NEVER. + CHECK_IN_TABLE // This will be returned when the key does not exist in + // cache, when cache clean up policy is MANUAL. So caller need to check + // if it might exist in it's rocksdb table. + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java index 0536ed7061557..ed37d411303a5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java @@ -21,7 +21,8 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; - +import org.apache.hadoop.utils.db.cache.CacheResult.CacheStatus; +import org.apache.hadoop.utils.db.cache.TableCacheImpl.CacheCleanupPolicy; import java.util.Iterator; import java.util.Map; @@ -52,8 +53,11 @@ public interface TableCache> iterator(); + + /** + * Check key exist in cache or not. + * + * If it exists return CacheResult with value and status as + * {@link CacheStatus#EXISTS} + * + * If it does not exist: + * If cache clean up policy is + * {@link TableCacheImpl.CacheCleanupPolicy#NEVER} it means table cache is + * full cache. It return's {@link CacheResult} with null + * and status as {@link CacheStatus#NOT_EXIST}. + * + * If cache clean up policy is {@link CacheCleanupPolicy#MANUAL} it means + * table cache is partial cache. It return's {@link CacheResult} with + * null and status as CHECK_IN_TABLE. + * + * @param cachekey + */ + CacheResult isExist(CACHEKEY cachekey); + } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java index d6c1bb9f92283..f61bb0d81f68a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java @@ -35,7 +35,7 @@ * Cache implementation for the table. Depending on the cache clean up policy * this cache will be full cache or partial cache. * - * If cache cleanup policy is set as {@link CacheCleanupPolicy#AFTER_FLUSH}, + * If cache cleanup policy is set as {@link CacheCleanupPolicy#MANUAL}, * this will be a partial cache. * * If cache cleanup policy is set as {@link CacheCleanupPolicy#NEVER}, @@ -77,10 +77,7 @@ public void put(CACHEKEY cacheKey, CACHEVALUE value) { @Override public void cleanup(long epoch) { - // If it is never do nothing. - if (cleanupPolicy == CacheCleanupPolicy.AFTER_FLUSH) { - executorService.submit(() -> evictCache(epoch)); - } + executorService.submit(() -> evictCache(epoch, cleanupPolicy)); } @Override @@ -93,16 +90,24 @@ public Iterator> iterator() { return cache.entrySet().iterator(); } - private void evictCache(long epoch) { + private void evictCache(long epoch, CacheCleanupPolicy cacheCleanupPolicy) { EpochEntry currentEntry = null; for (Iterator> iterator = epochEntries.iterator(); iterator.hasNext();) { currentEntry = iterator.next(); CACHEKEY cachekey = currentEntry.getCachekey(); CacheValue cacheValue = cache.computeIfPresent(cachekey, ((k, v) -> { - if (v.getEpoch() <= epoch) { - iterator.remove(); - return null; + if (cleanupPolicy == CacheCleanupPolicy.MANUAL) { + if (v.getEpoch() <= epoch) { + iterator.remove(); + return null; + } + } else if (cleanupPolicy == CacheCleanupPolicy.NEVER) { + // Remove only entries which are marked for delete. + if (v.getEpoch() <= epoch && v.getCacheValue() == null) { + iterator.remove(); + return null; + } } return v; })); @@ -114,12 +119,47 @@ private void evictCache(long epoch) { } } + public CacheResult isExist(CACHEKEY cachekey) { + + // TODO: Remove this check once HA and Non-HA code is merged and all + // requests are converted to use cache and double buffer. + // This is to done as temporary instead of passing ratis enabled flag + // which requires more code changes. We cannot use ratis enabled flag + // also because some of the requests in OM HA are not modified to use + // double buffer and cache. + + if (cache.size() == 0) { + return new CacheResult<>(CacheResult.CacheStatus.CHECK_IN_TABLE, + null); + } + + CACHEVALUE cachevalue = cache.get(cachekey); + if (cachevalue == null) { + if (cleanupPolicy == CacheCleanupPolicy.NEVER) { + return new CacheResult<>(CacheResult.CacheStatus.NOT_EXIST, null); + } else { + return new CacheResult<>(CacheResult.CacheStatus.CHECK_IN_TABLE, + null); + } + } else { + if (cachevalue.getCacheValue() != null) { + return new CacheResult<>(CacheResult.CacheStatus.EXISTS, cachevalue); + } else { + // When entity is marked for delete, cacheValue will be set to null. + // In that case we can return NOT_EXIST irrespective of cache cleanup + // policy. + return new CacheResult<>(CacheResult.CacheStatus.NOT_EXIST, null); + } + } + } + /** * Cleanup policies for table cache. */ public enum CacheCleanupPolicy { NEVER, // Cache will not be cleaned up. This mean's the table maintains // full cache. - AFTER_FLUSH // Cache will be cleaned up, once after flushing to DB. + MANUAL // Cache will be cleaned up, once after flushing to DB. It is + // caller's responsibility to flush to DB, before calling cleanup cache. } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestTableCacheImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestTableCacheImpl.java index e8dc8154afdf5..c2ae8a1c81c56 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestTableCacheImpl.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestTableCacheImpl.java @@ -47,7 +47,7 @@ public class TestTableCacheImpl { public static Collection policy() { Object[][] params = new Object[][] { {TableCacheImpl.CacheCleanupPolicy.NEVER}, - {TableCacheImpl.CacheCleanupPolicy.AFTER_FLUSH} + {TableCacheImpl.CacheCleanupPolicy.MANUAL} }; return Arrays.asList(params); } @@ -128,7 +128,7 @@ public void testPartialTableCacheParallel() throws Exception { totalCount += value; - if (cacheCleanupPolicy == TableCacheImpl.CacheCleanupPolicy.AFTER_FLUSH) { + if (cacheCleanupPolicy == TableCacheImpl.CacheCleanupPolicy.MANUAL) { int deleted = 5; // cleanup first 5 entires From a42d2e421fcd09e04906a470c71fcbcc8adf700a Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Tue, 16 Jul 2019 18:24:15 -0700 Subject: [PATCH 4/9] fix unused import. --- .../src/main/java/org/apache/hadoop/utils/db/TypedTable.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java index 99ebd5b372abb..53ba1e44f8774 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java @@ -33,7 +33,6 @@ import static org.apache.hadoop.utils.db.cache.CacheResult.CacheStatus.EXISTS; import static org.apache.hadoop.utils.db.cache.CacheResult.CacheStatus.NOT_EXIST; -import static org.apache.hadoop.utils.db.cache.CacheResult.CacheStatus.CHECK_IN_TABLE; /** * Strongly typed table implementation. *

From 2dd7942e1fec8b6d85b8f22517fa3c39d5666c4c Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Tue, 16 Jul 2019 22:27:28 -0700 Subject: [PATCH 5/9] fix bug in code causing test failure. --- .../org/apache/hadoop/utils/db/TypedTable.java | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java index 53ba1e44f8774..5ca4251ae9df6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java @@ -100,13 +100,16 @@ public TypedTable( //fill cache try(TableIterator> tableIterator = iterator()) { - KeyValue kv = tableIterator.next(); - // We should build cache after OM restart when clean up policy is NEVER. - // Setting epoch value -1, so that when it is marked for delete, this - // will be considered for cleanup. - cache.put(new CacheKey<>(kv.getKey()), - new CacheValue<>(Optional.of(kv.getValue()), EPOCH_DEFAULT)); + while (tableIterator.hasNext()) { + KeyValue< KEY, VALUE > kv = tableIterator.next(); + + // We should build cache after OM restart when clean up policy is NEVER. + // Setting epoch value -1, so that when it is marked for delete, this + // will be considered for cleanup. + cache.put(new CacheKey<>(kv.getKey()), + new CacheValue<>(Optional.of(kv.getValue()), EPOCH_DEFAULT)); + } } } } From 23c3d2cdee3daee2cb4e541c986f15fbe5996ff8 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Tue, 16 Jul 2019 22:30:33 -0700 Subject: [PATCH 6/9] minor change. --- .../main/java/org/apache/hadoop/utils/db/TypedTable.java | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java index 5ca4251ae9df6..53097cb971fc9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java @@ -68,12 +68,9 @@ public class TypedTable implements Table { public TypedTable( Table rawTable, CodecRegistry codecRegistry, Class keyType, - Class valueType) { - this.rawTable = rawTable; - this.codecRegistry = codecRegistry; - this.keyType = keyType; - this.valueType = valueType; - cache = new TableCacheImpl<>(TableCacheImpl.CacheCleanupPolicy.MANUAL); + Class valueType) throws IOException { + this(rawTable, codecRegistry, keyType, valueType, + CacheCleanupPolicy.MANUAL); } /** From afd38e2b8570ef7285b2f05c847960093dbc3ccf Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Tue, 16 Jul 2019 22:40:40 -0700 Subject: [PATCH 7/9] javadoc change. --- .../src/main/java/org/apache/hadoop/utils/db/DBStore.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java index 95e57d9d7216d..59004c6e6e445 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java @@ -49,8 +49,8 @@ public interface DBStore extends AutoCloseable { /** * Gets an existing TableStore with implicit key/value conversion and - * with default cleanup policy for cache. Default policy is cache cleanup - * after flush to DB is completed. + * with default cleanup policy for cache. Default cache clean up policy is + * manual. * * @param name - Name of the TableStore to get * @param keyType From 8b368e3ac480095e51534166831fbfe0240dd210 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Tue, 16 Jul 2019 22:46:09 -0700 Subject: [PATCH 8/9] fix checkstyle. --- .../main/java/org/apache/hadoop/utils/db/TypedTable.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java index 53097cb971fc9..be13f6d1bf356 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java @@ -101,9 +101,9 @@ public TypedTable( while (tableIterator.hasNext()) { KeyValue< KEY, VALUE > kv = tableIterator.next(); - // We should build cache after OM restart when clean up policy is NEVER. - // Setting epoch value -1, so that when it is marked for delete, this - // will be considered for cleanup. + // We should build cache after OM restart when clean up policy is + // NEVER. Setting epoch value -1, so that when it is marked for + // delete, this will be considered for cleanup. cache.put(new CacheKey<>(kv.getKey()), new CacheValue<>(Optional.of(kv.getValue()), EPOCH_DEFAULT)); } From b20c04b60df8b88ad7fd07cd7d51ad1a4b398ef2 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Wed, 17 Jul 2019 16:19:23 -0700 Subject: [PATCH 9/9] fix review comments. --- .../main/java/org/apache/hadoop/utils/db/TypedTable.java | 4 ++-- .../java/org/apache/hadoop/utils/db/cache/CacheResult.java | 2 +- .../java/org/apache/hadoop/utils/db/cache/TableCache.java | 4 ++-- .../org/apache/hadoop/utils/db/cache/TableCacheImpl.java | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java index be13f6d1bf356..e8f9e0a8b306b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java @@ -135,7 +135,7 @@ public boolean isEmpty() throws IOException { public boolean isExist(KEY key) throws IOException { CacheResult> cacheResult = - cache.isExist(new CacheKey<>(key)); + cache.lookup(new CacheKey<>(key)); if (cacheResult.getCacheStatus() == EXISTS) { return true; @@ -164,7 +164,7 @@ public VALUE get(KEY key) throws IOException { // key during get key. CacheResult> cacheResult = - cache.isExist(new CacheKey<>(key)); + cache.lookup(new CacheKey<>(key)); if (cacheResult.getCacheStatus() == EXISTS) { return cacheResult.getValue().getCacheValue(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheResult.java index 1c54f11a3bf6c..76b8381d7bbb3 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheResult.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheResult.java @@ -51,7 +51,7 @@ public enum CacheStatus { NOT_EXIST, // We guarantee that it does not exist. This will be returned // when the key does not exist in cache, when cache clean up policy is // NEVER. - CHECK_IN_TABLE // This will be returned when the key does not exist in + MAY_EXIST // This will be returned when the key does not exist in // cache, when cache clean up policy is MANUAL. So caller need to check // if it might exist in it's rocksdb table. } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java index ed37d411303a5..0d191481992f6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java @@ -88,10 +88,10 @@ public interface TableCache isExist(CACHEKEY cachekey); + CacheResult lookup(CACHEKEY cachekey); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java index f61bb0d81f68a..7a9d55b18a9ee 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java @@ -119,7 +119,7 @@ private void evictCache(long epoch, CacheCleanupPolicy cacheCleanupPolicy) { } } - public CacheResult isExist(CACHEKEY cachekey) { + public CacheResult lookup(CACHEKEY cachekey) { // TODO: Remove this check once HA and Non-HA code is merged and all // requests are converted to use cache and double buffer. @@ -129,7 +129,7 @@ public CacheResult isExist(CACHEKEY cachekey) { // double buffer and cache. if (cache.size() == 0) { - return new CacheResult<>(CacheResult.CacheStatus.CHECK_IN_TABLE, + return new CacheResult<>(CacheResult.CacheStatus.MAY_EXIST, null); } @@ -138,7 +138,7 @@ public CacheResult isExist(CACHEKEY cachekey) { if (cleanupPolicy == CacheCleanupPolicy.NEVER) { return new CacheResult<>(CacheResult.CacheStatus.NOT_EXIST, null); } else { - return new CacheResult<>(CacheResult.CacheStatus.CHECK_IN_TABLE, + return new CacheResult<>(CacheResult.CacheStatus.MAY_EXIST, null); } } else {