Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -316,4 +316,11 @@ default boolean matchReplicationScope(boolean enabled) {
}
return !enabled;
}

/**
* Checks whether row caching is enabled for this table. Note that row caching applies only at the
* entire row level, not at the column family level.
* @return {@code true} if row cache is enabled, otherwise {@code false}
*/
boolean isRowCacheEnabled();
}
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,15 @@ public class TableDescriptorBuilder {
private final static Map<String, String> DEFAULT_VALUES = new HashMap<>();
private final static Set<Bytes> RESERVED_KEYWORDS = new HashSet<>();

/**
* Used by HBase Shell interface to access this metadata attribute which denotes if the row cache
* is enabled.
*/
@InterfaceAudience.Private
public static final String ROW_CACHE_ENABLED = "ROW_CACHE_ENABLED";
private static final Bytes ROW_CACHE_ENABLED_KEY = new Bytes(Bytes.toBytes(ROW_CACHE_ENABLED));
private static final boolean DEFAULT_ROW_CACHE_ENABLED = false;

static {
DEFAULT_VALUES.put(MAX_FILESIZE, String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
Expand All @@ -236,6 +245,7 @@ public class TableDescriptorBuilder {
DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY));
// Setting ERASURE_CODING_POLICY to NULL so that it is not considered as metadata
DEFAULT_VALUES.put(ERASURE_CODING_POLICY, String.valueOf(DEFAULT_ERASURE_CODING_POLICY));
DEFAULT_VALUES.put(ROW_CACHE_ENABLED, String.valueOf(DEFAULT_ROW_CACHE_ENABLED));
DEFAULT_VALUES.keySet().stream().map(s -> new Bytes(Bytes.toBytes(s)))
.forEach(RESERVED_KEYWORDS::add);
RESERVED_KEYWORDS.add(IS_META_KEY);
Expand Down Expand Up @@ -565,6 +575,11 @@ public TableDescriptor build() {
return new ModifyableTableDescriptor(desc);
}

public TableDescriptorBuilder setRowCacheEnabled(boolean rowCacheEnabled) {
desc.setRowCacheEnabled(rowCacheEnabled);
return this;
}

private static final class ModifyableTableDescriptor
implements TableDescriptor, Comparable<ModifyableTableDescriptor> {

Expand Down Expand Up @@ -1510,6 +1525,15 @@ public Optional<String> getRegionServerGroup() {
return Optional.empty();
}
}

@Override
public boolean isRowCacheEnabled() {
return getOrDefault(ROW_CACHE_ENABLED_KEY, Boolean::valueOf, DEFAULT_ROW_CACHE_ENABLED);
}

private ModifyableTableDescriptor setRowCacheEnabled(boolean enabled) {
return setValue(ROW_CACHE_ENABLED_KEY, Boolean.toString(enabled));
}
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1017,6 +1017,12 @@ public enum OperationStatusCode {

public static final float HFILE_BLOCK_CACHE_SIZE_DEFAULT = 0.4f;

/**
* Configuration key for the size of the row cache
*/
public static final String ROW_CACHE_SIZE_KEY = "row.cache.size";
public static final float ROW_CACHE_SIZE_DEFAULT = 0.0f;

/**
* Configuration key for the memory size of the block cache
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,25 +93,29 @@ public static void validateRegionServerHeapMemoryAllocation(Configuration conf)
}
float memStoreFraction = getGlobalMemStoreHeapPercent(conf, false);
float blockCacheFraction = getBlockCacheHeapPercent(conf);
float rowCacheFraction =
conf.getFloat(HConstants.ROW_CACHE_SIZE_KEY, HConstants.ROW_CACHE_SIZE_DEFAULT);
float minFreeHeapFraction = getRegionServerMinFreeHeapFraction(conf);

int memStorePercent = (int) (memStoreFraction * 100);
int blockCachePercent = (int) (blockCacheFraction * 100);
int rowCachePercent = (int) (rowCacheFraction * 100);
int minFreeHeapPercent = (int) (minFreeHeapFraction * 100);
int usedPercent = memStorePercent + blockCachePercent;
int usedPercent = memStorePercent + blockCachePercent + rowCachePercent;
int maxAllowedUsed = 100 - minFreeHeapPercent;

if (usedPercent > maxAllowedUsed) {
throw new RuntimeException(String.format(
"RegionServer heap memory allocation is invalid: total memory usage exceeds 100%% "
+ "(memStore + blockCache + requiredFreeHeap). "
+ "Check the following configuration values:%n" + " - %s = %.2f%n" + " - %s = %s%n"
+ " - %s = %s%n" + " - %s = %s",
+ "(memStore + blockCache + rowCache + requiredFreeHeap). "
+ "Check the following configuration values:" + "%n - %s = %.2f" + "%n - %s = %s"
+ "%n - %s = %s" + "%n - %s = %s" + "%n - %s = %s",
MEMSTORE_SIZE_KEY, memStoreFraction, HConstants.HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY,
conf.get(HConstants.HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY),
HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, conf.get(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY),
HBASE_REGION_SERVER_FREE_HEAP_MIN_MEMORY_SIZE_KEY,
conf.get(HBASE_REGION_SERVER_FREE_HEAP_MIN_MEMORY_SIZE_KEY)));
conf.get(HBASE_REGION_SERVER_FREE_HEAP_MIN_MEMORY_SIZE_KEY), HConstants.ROW_CACHE_SIZE_KEY,
conf.get(HConstants.ROW_CACHE_SIZE_KEY)));
}
}

Expand Down Expand Up @@ -313,4 +317,15 @@ public static long getBucketCacheSize(final Configuration conf) {
}
return (long) (bucketCacheSize * 1024 * 1024);
}

public static long getRowCacheSize(Configuration conf) {
long max = -1L;
final MemoryUsage usage = safeGetHeapMemoryUsage();
if (usage != null) {
max = usage.getMax();
}
float globalRowCachePercent =
conf.getFloat(HConstants.ROW_CACHE_SIZE_KEY, HConstants.ROW_CACHE_SIZE_DEFAULT);
return ((long) (max * globalRowCachePercent));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.LongAdder;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
Expand Down Expand Up @@ -433,6 +434,11 @@ public MetricsTableRequests getMetricsTableRequests() {
*/
private long openSeqNum = HConstants.NO_SEQNUM;

/**
* Basically the same as openSeqNum, but it is updated when bulk load is done.
*/
private final AtomicLong rowCacheSeqNum = new AtomicLong(HConstants.NO_SEQNUM);

/**
* The default setting for whether to enable on-demand CF loading for scan requests to this
* region. Requests can override it.
Expand Down Expand Up @@ -7881,6 +7887,7 @@ private HRegion openHRegion(final CancelableProgressable reporter) throws IOExce
LOG.debug("checking classloading for " + this.getRegionInfo().getEncodedName());
TableDescriptorChecker.checkClassLoading(cConfig, htableDescriptor);
this.openSeqNum = initialize(reporter);
this.rowCacheSeqNum.set(this.openSeqNum);
this.mvcc.advanceTo(openSeqNum);
// The openSeqNum must be increased every time when a region is assigned, as we rely on it to
// determine whether a region has been successfully reopened. So here we always write open
Expand Down Expand Up @@ -8709,6 +8716,17 @@ public long getOpenSeqNum() {
return this.openSeqNum;
}

public long getRowCacheSeqNum() {
return this.rowCacheSeqNum.get();
}

/**
* This is used to invalidate the row cache of the bulk-loaded region.
*/
public void increaseRowCacheSeqNum() {
this.rowCacheSeqNum.incrementAndGet();
}

@Override
public Map<byte[], Long> getMaxStoreSeqId() {
return this.maxSeqIdInStores;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,11 @@ public class RSRpcServices extends HBaseRpcServicesBase<HRegionServer>
public static final String REGIONSERVER_BOOTSTRAP_NODES_SERVICE_CONFIG =
"hbase.regionserver.bootstrap.nodes.executorService";

/**
* The row cache service
*/
private final RowCacheService rowCacheService = new RowCacheService(getConfiguration());

/**
* An Rpc callback for closing a RegionScanner.
*/
Expand Down Expand Up @@ -668,7 +673,7 @@ private CheckAndMutateResult checkAndMutate(HRegion region, List<ClientProtos.Ac
result = region.getCoprocessorHost().preCheckAndMutate(checkAndMutate);
}
if (result == null) {
result = region.checkAndMutate(checkAndMutate, nonceGroup, nonce);
result = rowCacheService.checkAndMutate(region, checkAndMutate, nonceGroup, nonce);
if (region.getCoprocessorHost() != null) {
result = region.getCoprocessorHost().postCheckAndMutate(checkAndMutate, result);
}
Expand Down Expand Up @@ -1020,7 +1025,8 @@ private void doBatchOp(final RegionActionResult.Builder builder, final HRegion r
Arrays.sort(mArray, (v1, v2) -> Row.COMPARATOR.compare(v1, v2));
}

OperationStatus[] codes = region.batchMutate(mArray, atomic, nonceGroup, nonce);
OperationStatus[] codes =
rowCacheService.batchMutate(region, mArray, atomic, nonceGroup, nonce);

// When atomic is true, it indicates that the mutateRow API or the batch API with
// RowMutations is called. In this case, we need to merge the results of the
Expand Down Expand Up @@ -2336,6 +2342,11 @@ public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller,
@Override
public BulkLoadHFileResponse bulkLoadHFile(final RpcController controller,
final BulkLoadHFileRequest request) throws ServiceException {
return rowCacheService.bulkLoadHFile(this, request);
}

BulkLoadHFileResponse bulkLoadHFileInternal(final BulkLoadHFileRequest request)
throws ServiceException {
long start = EnvironmentEdgeManager.currentTime();
List<String> clusterIds = new ArrayList<>(request.getClusterIdsList());
if (clusterIds.contains(this.server.getClusterId())) {
Expand Down Expand Up @@ -2592,8 +2603,7 @@ private Result get(Get get, HRegion region, RegionScannersCloseCallBack closeCal
RegionScannerImpl scanner = null;
long blockBytesScannedBefore = context.getBlockBytesScanned();
try {
scanner = region.getScanner(scan);
scanner.next(results);
scanner = rowCacheService.getScanner(region, scan, results);
} finally {
if (scanner != null) {
if (closeCallBack == null) {
Expand Down Expand Up @@ -3002,33 +3012,10 @@ public MutateResponse mutate(final RpcController rpcc, final MutateRequest reque
builder.setMetrics(ProtobufUtil.toQueryMetrics(result.getMetrics()));
}
} else {
Result r = null;
Boolean processed = null;
MutationType type = mutation.getMutateType();
switch (type) {
case APPEND:
// TODO: this doesn't actually check anything.
r = append(region, quota, mutation, cellScanner, nonceGroup, spaceQuotaEnforcement,
context);
break;
case INCREMENT:
// TODO: this doesn't actually check anything.
r = increment(region, quota, mutation, cellScanner, nonceGroup, spaceQuotaEnforcement,
context);
break;
case PUT:
put(region, quota, mutation, cellScanner, spaceQuotaEnforcement);
processed = Boolean.TRUE;
break;
case DELETE:
delete(region, quota, mutation, cellScanner, spaceQuotaEnforcement);
processed = Boolean.TRUE;
break;
default:
throw new DoNotRetryIOException("Unsupported mutate type: " + type.name());
}
if (processed != null) {
builder.setProcessed(processed);
Result r = rowCacheService.mutate(this, region, mutation, quota, cellScanner, nonceGroup,
spaceQuotaEnforcement, context);
if (r == Result.EMPTY_RESULT) {
builder.setProcessed(true);
}
boolean clientCellBlockSupported = isClientCellBlockSupport(context);
addResult(builder, r, controller, clientCellBlockSupported);
Expand All @@ -3047,6 +3034,29 @@ public MutateResponse mutate(final RpcController rpcc, final MutateRequest reque
}
}

Result mutateInternal(MutationProto mutation, HRegion region, OperationQuota quota,
CellScanner cellScanner, long nonceGroup, ActivePolicyEnforcement spaceQuotaEnforcement,
RpcCallContext context) throws IOException {
MutationType type = mutation.getMutateType();
return switch (type) {
case APPEND ->
// TODO: this doesn't actually check anything.
append(region, quota, mutation, cellScanner, nonceGroup, spaceQuotaEnforcement, context);
case INCREMENT ->
// TODO: this doesn't actually check anything.
increment(region, quota, mutation, cellScanner, nonceGroup, spaceQuotaEnforcement,
context);
case PUT -> {
put(region, quota, mutation, cellScanner, spaceQuotaEnforcement);
yield Result.EMPTY_RESULT;
}
case DELETE -> {
delete(region, quota, mutation, cellScanner, spaceQuotaEnforcement);
yield Result.EMPTY_RESULT;
}
};
}

private void put(HRegion region, OperationQuota quota, MutationProto mutation,
CellScanner cellScanner, ActivePolicyEnforcement spaceQuota) throws IOException {
long before = EnvironmentEdgeManager.currentTime();
Expand Down Expand Up @@ -3095,7 +3105,7 @@ private CheckAndMutateResult checkAndMutate(HRegion region, OperationQuota quota
result = region.getCoprocessorHost().preCheckAndMutate(checkAndMutate);
}
if (result == null) {
result = region.checkAndMutate(checkAndMutate, nonceGroup, nonce);
result = rowCacheService.checkAndMutate(region, checkAndMutate, nonceGroup, nonce);
if (region.getCoprocessorHost() != null) {
result = region.getCoprocessorHost().postCheckAndMutate(checkAndMutate, result);
}
Expand Down Expand Up @@ -4079,4 +4089,9 @@ RegionScannerContext checkQuotaAndGetRegionScannerContext(ScanRequest request,
Pair<String, RegionScannerHolder> pair = newRegionScanner(request, region, builder);
return new RegionScannerContext(pair.getFirst(), pair.getSecond(), quota);
}

// For testing only
public RowCacheService getRowCacheService() {
return rowCacheService;
}
}
Loading