Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,12 @@
*/
package org.apache.hadoop.hbase.regionserver;

import org.apache.hadoop.hbase.metrics.BaseSource;

/**
* Latency metrics for a specific table in a RegionServer.
*/
public interface MetricsTableLatencies {
public interface MetricsTableLatencies extends BaseSource {

/**
* The name of the metrics
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
package org.apache.hadoop.hbase.regionserver;

public interface MetricsTableQPS {

String TABLE_READ_QPS = "tableReadQPS";
String TABLE_WRITE_QPS = "tableWriteQPS";

/**
* Update table read QPS
* @param tableName The table the metric is for
* @param count Number of occurrences to record
*/
void updateTableReadQPS(String tableName, long count);

/**
* Update table read QPS
* @param tableName The table the metric is for
*/
void updateTableReadQPS(String tableName);

/**
* Update table write QPS
* @param tableName The table the metric is for
* @param count Number of occurrences to record
*/
void updateTableWriteQPS(String tableName, long count);

/**
* Update table write QPS
* @param tableName The table the metric is for
*/
void updateTableWriteQPS(String tableName);
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;

import com.google.common.annotations.VisibleForTesting;
Expand Down Expand Up @@ -172,4 +174,14 @@ public void updateScanSize(String tableName, long scanSize) {
public void updateScanTime(String tableName, long t) {
getOrCreateTableHistogram(tableName).updateScanTime(t);
}

@Override public void getMetrics(MetricsCollector metricsCollector, boolean all) {
MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName);
// source is registered in supers constructor, sometimes called before the whole initialization.
metricsRegistry.snapshot(mrb, all);
if (metricsAdapter != null) {
// snapshot MetricRegistry as well
metricsAdapter.snapshotAllMetrics(registry, mrb);
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
package org.apache.hadoop.hbase.regionserver;

import java.util.HashMap;

import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.metrics.Meter;
import org.apache.hadoop.hbase.metrics.MetricRegistry;

@InterfaceAudience.Private
public class MetricsTableQPSImpl implements MetricsTableQPS {

private final HashMap<TableName,TableMeters> metersByTable = new HashMap<>();
private final MetricRegistry metricRegistry;

public MetricsTableQPSImpl(MetricRegistry metricRegistry) {
this.metricRegistry = metricRegistry;
}

public static class TableMeters {
final Meter tableReadQPSMeter;
final Meter tableWriteQPSMeter;

TableMeters(MetricRegistry metricRegistry, TableName tableName) {
this.tableReadQPSMeter = metricRegistry.meter(qualifyMetricsName(tableName, TABLE_READ_QPS));
this.tableWriteQPSMeter =
metricRegistry.meter(qualifyMetricsName(tableName, TABLE_WRITE_QPS));
}

public void updateTableReadQPS(long count) {
tableReadQPSMeter.mark(count);
}
public void updateTableReadQPS() {
tableReadQPSMeter.mark();
}
public void updateTableWriteQPS(long count) {
tableWriteQPSMeter.mark(count);
}
public void updateTableWriteQPS() {
tableWriteQPSMeter.mark();
}
}

private static String qualifyMetricsName(TableName tableName, String metric) {
StringBuilder sb = new StringBuilder();
sb.append("Namespace_").append(tableName.getNamespaceAsString());
sb.append("_table_").append(tableName.getQualifierAsString());
sb.append("_metric_").append(metric);
return sb.toString();
}

private TableMeters getOrCreateTableMeter(String tableName) {
final TableName tn = TableName.valueOf(tableName);
TableMeters meter = metersByTable.get(tn);
if (meter == null) {
meter = new TableMeters(metricRegistry, tn);
metersByTable.put(tn, meter);
}
return meter;
}

@Override
public void updateTableReadQPS(String tableName, long count) {
getOrCreateTableMeter(tableName).updateTableReadQPS(count);
}

@Override
public void updateTableReadQPS(String tableName) {
getOrCreateTableMeter(tableName).updateTableReadQPS();
}

@Override
public void updateTableWriteQPS(String tableName, long count) {
getOrCreateTableMeter(tableName).updateTableWriteQPS(count);
}

@Override
public void updateTableWriteQPS(String tableName) {
getOrCreateTableMeter(tableName).updateTableWriteQPS();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -3149,6 +3149,10 @@ OperationStatus[] batchMutate(BatchOperationInProgress<?> batchOp) throws IOExce

if (!initialized) {
this.writeRequestsCount.add(batchOp.operations.length);
if (rsServices instanceof HRegionServer) {
((HRegionServer) rsServices).getRegionServerMetrics().
updateServerWriteQPS(this.htableDescriptor.getTableName(), batchOp.operations.length);
}
if (!batchOp.isInReplay()) {
doPreMutationHook(batchOp);
}
Expand Down Expand Up @@ -5851,7 +5855,10 @@ public boolean bulkLoadHFiles(Collection<Pair<byte[], String>> familyPaths, bool
boolean isSuccessful = false;
try {
this.writeRequestsCount.increment();

if (rsServices instanceof HRegionServer) {
((HRegionServer) rsServices).getRegionServerMetrics().
updateServerWriteQPS(this.htableDescriptor.getTableName());
}
// There possibly was a split that happened between when the split keys
// were gathered and before the HRegion's write lock was taken. We need
// to validate the HFile region before attempting to bulk load all of them
Expand Down Expand Up @@ -6246,6 +6253,10 @@ public boolean nextRaw(List<Cell> outResults, ScannerContext scannerContext)
// scanner is closed
throw new UnknownScannerException("Scanner was closed");
}
if (rsServices instanceof HRegionServer) {
((HRegionServer) rsServices).getRegionServerMetrics().
updateServerReadQPS(getRegionInfo().getTable());
}
boolean moreValues = false;
if (outResults.isEmpty()) {
// Usually outResults is empty. This is true when next is called
Expand Down Expand Up @@ -7590,6 +7601,10 @@ public void processRowsWithLocks(RowProcessor<?,?> processor, long timeout,

if (!mutations.isEmpty()) {
writeRequestsCount.add(mutations.size());
if (rsServices instanceof HRegionServer) {
((HRegionServer) rsServices).getRegionServerMetrics().
updateServerWriteQPS(this.htableDescriptor.getTableName(), mutations.size());
}
// 5. Call the preBatchMutate hook
processor.preBatchMutate(this, walEdit);

Expand Down Expand Up @@ -7792,6 +7807,10 @@ public Result append(Append mutate, long nonceGroup, long nonce) throws IOExcept
// Lock row
startRegionOperation(op);
this.writeRequestsCount.increment();
if (rsServices instanceof HRegionServer) {
((HRegionServer) rsServices).getRegionServerMetrics().
updateServerWriteQPS(this.htableDescriptor.getTableName());
}
RowLock rowLock = null;
WALKey walKey = null;
boolean doRollBackMemstore = false;
Expand Down Expand Up @@ -8061,6 +8080,10 @@ public Result increment(Increment mutation, long nonceGroup, long nonce)
checkFamilies(mutation.getFamilyCellMap().keySet());
startRegionOperation(op);
this.writeRequestsCount.increment();
if (rsServices instanceof HRegionServer) {
((HRegionServer) rsServices).getRegionServerMetrics().
updateServerWriteQPS(this.htableDescriptor.getTableName());
}
try {
// Which Increment is it? Narrow increment-only consistency or slow (default) and general
// row-wide consistency.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.metrics.Meter;
import org.apache.hadoop.hbase.metrics.MetricRegistries;
import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.hadoop.hbase.metrics.Timer;
Expand Down Expand Up @@ -49,6 +50,8 @@ public class MetricsRegionServer {

private MetricRegistry metricRegistry;
private Timer bulkLoadTimer;
private Meter serverReadQPS;
private Meter serverWriteQPS;

public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Configuration conf) {
this(regionServerWrapper,
Expand All @@ -62,6 +65,8 @@ public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Confi

// create and use metrics from the new hbase-metrics based registry.
bulkLoadTimer = metricRegistry.timer("Bulkload");
serverReadQPS = metricRegistry.meter("ServerReadQPS");
serverWriteQPS = metricRegistry.meter("ServerWriteQPS");
}

MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper,
Expand Down Expand Up @@ -211,4 +216,32 @@ public void updateCompaction(boolean isMajor, long t, int inputFileCount, int ou
public void updateBulkLoad(long millis) {
this.bulkLoadTimer.updateMillis(millis);
}

public void updateServerReadQPS(TableName tn, long count) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateTableReadQPS(tn, count);
}
this.serverReadQPS.mark(count);
}

public void updateServerReadQPS(TableName tn) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateTableReadQPS(tn);
}
this.serverReadQPS.mark();
}

public void updateServerWriteQPS(TableName tn, long count) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateTableWriteQPS(tn, count);
}
this.serverWriteQPS.mark(count);
}

public void updateServerWriteQPS(TableName tn) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateTableWriteQPS(tn);
}
this.serverWriteQPS.mark();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.metrics.MetricRegistries;
import org.apache.hadoop.hbase.metrics.MetricRegistry;

/**
* Captures operation metrics by table. Separates metrics collection for table metrics away from
Expand All @@ -28,9 +30,13 @@
public class RegionServerTableMetrics {

private final MetricsTableLatencies latencies;
private MetricRegistry metricRegistry;
MetricsTableQPS qps;

public RegionServerTableMetrics() {
latencies = CompatibilitySingletonFactory.getInstance(MetricsTableLatencies.class);
metricRegistry = MetricRegistries.global().get(latencies.getMetricRegistryInfo()).get();
qps = new MetricsTableQPSImpl(metricRegistry);
}

public void updatePut(TableName table, long time) {
Expand Down Expand Up @@ -68,4 +74,20 @@ public void updateScanTime(TableName table, long time) {
public void updateScanSize(TableName table, long size) {
latencies.updateScanSize(table.getNameAsString(), size);
}

public void updateTableReadQPS(TableName table, long count) {
qps.updateTableReadQPS(table.getNameAsString(), count);
}

public void updateTableReadQPS(TableName table) {
qps.updateTableReadQPS(table.getNameAsString());
}

public void updateTableWriteQPS(TableName table, long count) {
qps.updateTableWriteQPS(table.getNameAsString(), count);
}

public void updateTableWriteQPS(TableName table) {
qps.updateTableWriteQPS(table.getNameAsString());
}
}