Skip to content

Commit 0d1d7c8

Browse files
bharatviswa504arp7
authored andcommitted
HDDS-1499. OzoneManager Cache. (#798)
1 parent a36274d commit 0d1d7c8

File tree

14 files changed

+709
-11
lines changed

14 files changed

+709
-11
lines changed

hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ public interface DBStore extends AutoCloseable {
4444
*/
4545
Table<byte[], byte[]> getTable(String name) throws IOException;
4646

47+
4748
/**
4849
* Gets an existing TableStore with implicit key/value conversion.
4950
*

hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
import java.io.IOException;
2323
import java.nio.charset.StandardCharsets;
2424

25+
import org.apache.hadoop.classification.InterfaceAudience;
2526
import org.apache.hadoop.hdfs.DFSUtil;
2627

2728
import org.rocksdb.ColumnFamilyHandle;
@@ -33,9 +34,12 @@
3334
import org.slf4j.LoggerFactory;
3435

3536
/**
36-
* RocksDB implementation of ozone metadata store.
37+
* RocksDB implementation of ozone metadata store. This class should be only
38+
* used as part of TypedTable as it's underlying implementation to access the
39+
* metadata store content. All other user's using Table should use TypedTable.
3740
*/
38-
public class RDBTable implements Table<byte[], byte[]> {
41+
@InterfaceAudience.Private
42+
class RDBTable implements Table<byte[], byte[]> {
3943

4044

4145
private static final Logger LOG =
@@ -52,7 +56,7 @@ public class RDBTable implements Table<byte[], byte[]> {
5256
* @param handle - ColumnFamily Handle.
5357
* @param writeOptions - RocksDB write Options.
5458
*/
55-
public RDBTable(RocksDB db, ColumnFamilyHandle handle,
59+
RDBTable(RocksDB db, ColumnFamilyHandle handle,
5660
WriteOptions writeOptions) {
5761
this.db = db;
5862
this.handle = handle;

hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,10 @@
2121

2222
import java.io.IOException;
2323

24+
import org.apache.commons.lang3.NotImplementedException;
2425
import org.apache.hadoop.classification.InterfaceStability;
25-
26+
import org.apache.hadoop.utils.db.cache.CacheKey;
27+
import org.apache.hadoop.utils.db.cache.CacheValue;
2628
/**
2729
* Interface for key-value store that stores ozone metadata. Ozone metadata is
2830
* stored as key value pairs, both key and value are arbitrary byte arrays. Each
@@ -97,6 +99,28 @@ void putWithBatch(BatchOperation batch, KEY key, VALUE value)
9799
*/
98100
String getName() throws IOException;
99101

102+
/**
103+
* Add entry to the table cache.
104+
*
105+
* If the cacheKey already exists, it will override the entry.
106+
* @param cacheKey
107+
* @param cacheValue
108+
*/
109+
default void addCacheEntry(CacheKey<KEY> cacheKey,
110+
CacheValue<VALUE> cacheValue) {
111+
throw new NotImplementedException("addCacheEntry is not implemented");
112+
}
113+
114+
/**
115+
* Removes all the entries from the table cache which are having epoch value
116+
* less
117+
* than or equal to specified epoch value.
118+
* @param epoch
119+
*/
120+
default void cleanupCache(long epoch) {
121+
throw new NotImplementedException("cleanupCache is not implemented");
122+
}
123+
100124
/**
101125
* Class used to represent the key and value pair of a db entry.
102126
*/

hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java

Lines changed: 74 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,12 @@
2020

2121
import java.io.IOException;
2222

23+
import com.google.common.annotations.VisibleForTesting;
24+
import org.apache.hadoop.utils.db.cache.CacheKey;
25+
import org.apache.hadoop.utils.db.cache.CacheValue;
26+
import org.apache.hadoop.utils.db.cache.PartialTableCache;
27+
import org.apache.hadoop.utils.db.cache.TableCache;
28+
2329
/**
2430
* Strongly typed table implementation.
2531
* <p>
@@ -31,13 +37,16 @@
3137
*/
3238
public class TypedTable<KEY, VALUE> implements Table<KEY, VALUE> {
3339

34-
private Table<byte[], byte[]> rawTable;
40+
private final Table<byte[], byte[]> rawTable;
41+
42+
private final CodecRegistry codecRegistry;
3543

36-
private CodecRegistry codecRegistry;
44+
private final Class<KEY> keyType;
3745

38-
private Class<KEY> keyType;
46+
private final Class<VALUE> valueType;
47+
48+
private final TableCache<CacheKey<KEY>, CacheValue<VALUE>> cache;
3949

40-
private Class<VALUE> valueType;
4150

4251
public TypedTable(
4352
Table<byte[], byte[]> rawTable,
@@ -47,6 +56,7 @@ public TypedTable(
4756
this.codecRegistry = codecRegistry;
4857
this.keyType = keyType;
4958
this.valueType = valueType;
59+
cache = new PartialTableCache<>();
5060
}
5161

5262
@Override
@@ -69,8 +79,34 @@ public boolean isEmpty() throws IOException {
6979
return rawTable.isEmpty();
7080
}
7181

82+
/**
83+
* Returns the value mapped to the given key in byte array or returns null
84+
* if the key is not found.
85+
*
86+
* Caller's of this method should use synchronization mechanism, when
87+
* accessing. First it will check from cache, if it has entry return the
88+
* value, otherwise get from the RocksDB table.
89+
*
90+
* @param key metadata key
91+
* @return VALUE
92+
* @throws IOException
93+
*/
7294
@Override
7395
public VALUE get(KEY key) throws IOException {
96+
// Here the metadata lock will guarantee that cache is not updated for same
97+
// key during get key.
98+
CacheValue< VALUE > cacheValue = cache.get(new CacheKey<>(key));
99+
if (cacheValue == null) {
100+
// If no cache for the table or if it does not exist in cache get from
101+
// RocksDB table.
102+
return getFromTable(key);
103+
} else {
104+
// We have a value in cache, return the value.
105+
return cacheValue.getValue();
106+
}
107+
}
108+
109+
private VALUE getFromTable(KEY key) throws IOException {
74110
byte[] keyBytes = codecRegistry.asRawData(key);
75111
byte[] valueBytes = rawTable.get(keyBytes);
76112
return codecRegistry.asObject(valueBytes, valueType);
@@ -106,6 +142,40 @@ public void close() throws Exception {
106142

107143
}
108144

145+
@Override
146+
public void addCacheEntry(CacheKey<KEY> cacheKey,
147+
CacheValue<VALUE> cacheValue) {
148+
// This will override the entry if there is already entry for this key.
149+
cache.put(cacheKey, cacheValue);
150+
}
151+
152+
153+
@Override
154+
public void cleanupCache(long epoch) {
155+
cache.cleanup(epoch);
156+
}
157+
158+
@VisibleForTesting
159+
TableCache<CacheKey<KEY>, CacheValue<VALUE>> getCache() {
160+
return cache;
161+
}
162+
163+
public Table<byte[], byte[]> getRawTable() {
164+
return rawTable;
165+
}
166+
167+
public CodecRegistry getCodecRegistry() {
168+
return codecRegistry;
169+
}
170+
171+
public Class<KEY> getKeyType() {
172+
return keyType;
173+
}
174+
175+
public Class<VALUE> getValueType() {
176+
return valueType;
177+
}
178+
109179
/**
110180
* Key value implementation for strongly typed tables.
111181
*/
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
19+
package org.apache.hadoop.utils.db.cache;
20+
21+
import java.util.Objects;
22+
23+
/**
24+
* CacheKey for the RocksDB table.
25+
* @param <KEY>
26+
*/
27+
public class CacheKey<KEY> {
28+
29+
private final KEY key;
30+
31+
public CacheKey(KEY key) {
32+
Objects.requireNonNull(key, "Key Should not be null in CacheKey");
33+
this.key = key;
34+
}
35+
36+
public KEY getKey() {
37+
return key;
38+
}
39+
40+
@Override
41+
public boolean equals(Object o) {
42+
if (this == o) {
43+
return true;
44+
}
45+
if (o == null || getClass() != o.getClass()) {
46+
return false;
47+
}
48+
CacheKey<?> cacheKey = (CacheKey<?>) o;
49+
return Objects.equals(key, cacheKey.key);
50+
}
51+
52+
@Override
53+
public int hashCode() {
54+
return Objects.hash(key);
55+
}
56+
}
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
19+
package org.apache.hadoop.utils.db.cache;
20+
21+
import com.google.common.base.Optional;
22+
23+
/**
24+
* CacheValue for the RocksDB Table.
25+
* @param <VALUE>
26+
*/
27+
public class CacheValue<VALUE> {
28+
29+
private Optional<VALUE> value;
30+
// This value is used for evict entries from cache.
31+
// This value is set with ratis transaction context log entry index.
32+
private long epoch;
33+
34+
public CacheValue(Optional<VALUE> value, long epoch) {
35+
this.value = value;
36+
this.epoch = epoch;
37+
}
38+
39+
public VALUE getValue() {
40+
return value.orNull();
41+
}
42+
43+
public long getEpoch() {
44+
return epoch;
45+
}
46+
47+
}
Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*
18+
*/
19+
20+
package org.apache.hadoop.utils.db.cache;
21+
22+
import java.util.Objects;
23+
24+
/**
25+
* Class used which describes epoch entry. This will be used during deletion
26+
* entries from cache for partial table cache.
27+
* @param <CACHEKEY>
28+
*/
29+
public class EpochEntry<CACHEKEY> implements Comparable<CACHEKEY> {
30+
31+
private long epoch;
32+
private CACHEKEY cachekey;
33+
34+
EpochEntry(long epoch, CACHEKEY cachekey) {
35+
this.epoch = epoch;
36+
this.cachekey = cachekey;
37+
}
38+
39+
public long getEpoch() {
40+
return epoch;
41+
}
42+
43+
public CACHEKEY getCachekey() {
44+
return cachekey;
45+
}
46+
47+
@Override
48+
public boolean equals(Object o) {
49+
if (this == o) {
50+
return true;
51+
}
52+
if (o == null || getClass() != o.getClass()) {
53+
return false;
54+
}
55+
EpochEntry<?> that = (EpochEntry<?>) o;
56+
return epoch == that.epoch && cachekey == that.cachekey;
57+
}
58+
59+
@Override
60+
public int hashCode() {
61+
return Objects.hash(epoch, cachekey);
62+
}
63+
64+
public int compareTo(Object o) {
65+
if(this.epoch == ((EpochEntry<?>)o).epoch) {
66+
return 0;
67+
} else if (this.epoch < ((EpochEntry<?>)o).epoch) {
68+
return -1;
69+
} else {
70+
return 1;
71+
}
72+
}
73+
74+
}

0 commit comments

Comments
 (0)