Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -349,8 +349,8 @@ public static TableState getTableState(Result r) throws IOException {
}

/**
* @return Deserialized values of <qualifier,regioninfo> pairs taken from column values that
* match the regex 'info:merge.*' in array of <code>cells</code>.
* Returns Deserialized values of &lt;qualifier,regioninfo&gt; pairs taken from column values that
* match the regex 'info:merge.*' in array of <code>cells</code>.
*/
@Nullable
public static Map<String, RegionInfo> getMergeRegionsWithName(Cell[] cells) {
Expand All @@ -376,8 +376,8 @@ public static Map<String, RegionInfo> getMergeRegionsWithName(Cell[] cells) {
}

/**
* @return Deserialized regioninfo values taken from column values that match the regex
* 'info:merge.*' in array of <code>cells</code>.
* Returns Deserialized regioninfo values taken from column values that match the regex
* 'info:merge.*' in array of <code>cells</code>.
*/
@Nullable
public static List<RegionInfo> getMergeRegions(Cell[] cells) {
Expand All @@ -386,8 +386,8 @@ public static List<RegionInfo> getMergeRegions(Cell[] cells) {
}

/**
* @return True if any merge regions present in <code>cells</code>; i.e. the column in
* <code>cell</code> matches the regex 'info:merge.*'.
* Returns True if any merge regions present in <code>cells</code>; i.e. the column in
* <code>cell</code> matches the regex 'info:merge.*'.
*/
public static boolean hasMergeRegions(Cell[] cells) {
for (Cell cell : cells) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ private ClientMetaTableAccessor() {
}

@InterfaceAudience.Private
@SuppressWarnings("ImmutableEnumChecker")
public enum QueryType {
ALL(HConstants.TABLE_FAMILY, HConstants.CATALOG_FAMILY),
REGION(HConstants.CATALOG_FAMILY),
Expand Down Expand Up @@ -100,11 +101,7 @@ public static CompletableFuture<Optional<TableState>> getTableState(AsyncTable<?
return future;
}

/**
* Returns the HRegionLocation from meta for the given region n * @param regionName region we're
* looking for
* @return HRegionLocation for the given region
*/
/** Returns the HRegionLocation from meta for the given region */
public static CompletableFuture<Optional<HRegionLocation>>
getRegionLocation(AsyncTable<?> metaTable, byte[] regionName) {
CompletableFuture<Optional<HRegionLocation>> future = new CompletableFuture<>();
Expand All @@ -126,11 +123,7 @@ public static CompletableFuture<Optional<TableState>> getTableState(AsyncTable<?
return future;
}

/**
* Returns the HRegionLocation from meta for the given encoded region name n * @param
* encodedRegionName region we're looking for
* @return HRegionLocation for the given region
*/
/** Returns the HRegionLocation from meta for the given encoded region name */
public static CompletableFuture<Optional<HRegionLocation>>
getRegionLocationWithEncodedName(AsyncTable<?> metaTable, byte[] encodedRegionName) {
CompletableFuture<Optional<HRegionLocation>> future = new CompletableFuture<>();
Expand Down Expand Up @@ -167,8 +160,9 @@ private static Optional<TableState> getTableState(Result r) throws IOException {
}

/**
* Used to get all region locations for the specific table. n * @param tableName table we're
* looking for, can be null for getting all regions
* Used to get all region locations for the specific table
* @param metaTable scanner over meta table
* @param tableName table we're looking for, can be null for getting all regions
* @return the list of region locations. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
Expand All @@ -191,8 +185,9 @@ public static CompletableFuture<List<HRegionLocation>> getTableHRegionLocations(
}

/**
* Used to get table regions' info and server. n * @param tableName table we're looking for, can
* be null for getting all regions
* Used to get table regions' info and server.
* @param metaTable scanner over meta table
* @param tableName table we're looking for, can be null for getting all regions
* @param excludeOfflinedSplitParents don't return split parents
* @return the list of regioninfos and server. The return value will be wrapped by a
* {@link CompletableFuture}.
Expand Down Expand Up @@ -221,9 +216,11 @@ private static CompletableFuture<List<Pair<RegionInfo, ServerName>>> getTableReg
}

/**
* Performs a scan of META table for given table. n * @param tableName table withing we scan
* @param type scanned part of meta
* @param visitor Visitor invoked against each row
* Performs a scan of META table for given table.
* @param metaTable scanner over meta table
* @param tableName table within we scan
* @param type scanned part of meta
* @param visitor Visitor invoked against each row
*/
private static CompletableFuture<Void> scanMeta(AsyncTable<AdvancedScanResultConsumer> metaTable,
TableName tableName, QueryType type, final Visitor visitor) {
Expand All @@ -232,11 +229,13 @@ private static CompletableFuture<Void> scanMeta(AsyncTable<AdvancedScanResultCon
}

/**
* Performs a scan of META table for given table. n * @param startRow Where to start the scan
* @param stopRow Where to stop the scan
* @param type scanned part of meta
* @param maxRows maximum rows to return
* @param visitor Visitor invoked against each row
* Performs a scan of META table for given table.
* @param metaTable scanner over meta table
* @param startRow Where to start the scan
* @param stopRow Where to stop the scan
* @param type scanned part of meta
* @param maxRows maximum rows to return
* @param visitor Visitor invoked against each row
*/
private static CompletableFuture<Void> scanMeta(AsyncTable<AdvancedScanResultConsumer> metaTable,
byte[] startRow, byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) {
Expand Down Expand Up @@ -456,19 +455,12 @@ private static Scan getMetaScan(AsyncTable<?> metaTable, int rowUpperLimit) {
return scan;
}

/**
* Returns an HRegionLocationList extracted from the result.
* @return an HRegionLocationList containing all locations for the region range or null if we
* can't deserialize the result.
*/
/** Returns an HRegionLocationList extracted from the result. */
private static Optional<RegionLocations> getRegionLocations(Result r) {
return Optional.ofNullable(CatalogFamilyFormat.getRegionLocations(r));
}

/**
* @param tableName table we're working with
* @return start row for scanning META according to query type
*/
/** Returns start row for scanning META according to query type */
public static byte[] getTableStartRowForMeta(TableName tableName, QueryType type) {
if (tableName == null) {
return null;
Expand All @@ -490,10 +482,7 @@ public static byte[] getTableStartRowForMeta(TableName tableName, QueryType type
}
}

/**
* @param tableName table we're working with
* @return stop row for scanning META according to query type
*/
/** Returns stop row for scanning META according to query type */
public static byte[] getTableStopRowForMeta(TableName tableName, QueryType type) {
if (tableName == null) {
return null;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ public byte[] toByteArray() {
}

/**
* Parse the serialized representation of the {@link ClusterId}
* @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
* @return An instance of {@link ClusterId} made from <code>bytes</code> n * @see #toByteArray()
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,13 +67,13 @@ public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics m
.collect(Collectors.toList()))
.addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream()
.map(status -> ClusterStatusProtos.TableRegionStatesCount.newBuilder()
.setTableName(ProtobufUtil.toProtoTableName((status.getKey())))
.setTableName(ProtobufUtil.toProtoTableName(status.getKey()))
.setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue())).build())
.collect(Collectors.toList()))
.addAllDecommissionedServers(metrics.getDecommissionedServerNames().stream()
.map(ProtobufUtil::toServerName).collect(Collectors.toList()));
if (metrics.getMasterName() != null) {
builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName())));
builder.setMaster(ProtobufUtil.toServerName(metrics.getMasterName()));
}
if (metrics.getMasterTasks() != null) {
builder.addAllMasterTasks(metrics.getMasterTasks().stream()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ public interface CoprocessorEnvironment<C extends Coprocessor> {
int getLoadSequence();

/**
* @return a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to
* set a configuration.
* Returns a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to
* set a configuration.
*/
Configuration getConfiguration();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,7 @@ public HBaseServerException(boolean serverOverloaded, String message) {
this.serverOverloaded = serverOverloaded;
}

/**
* @param t throwable to check for server overloaded state
* @return True if the server was considered overloaded when the exception was thrown
*/
/** Returns True if the server was considered overloaded when the exception was thrown */
public static boolean isServerOverloaded(Throwable t) {
if (t instanceof HBaseServerException) {
return ((HBaseServerException) t).isServerOverloaded();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,8 @@ public long getSeqNum() {
}

/**
* @return String made of hostname and port formatted as per
* {@link Addressing#createHostAndPortStr(String, int)}
* Returns String made of hostname and port formatted as per
* {@link Addressing#createHostAndPortStr(String, int)}
*/
public String getHostnamePort() {
return Addressing.createHostAndPortStr(this.getHostname(), this.getPort());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,7 @@ public RegionLocations removeElementsWithNullLocation() {
* @param other the locations to merge with
* @return an RegionLocations object with merged locations or the same object if nothing is merged
*/
@SuppressWarnings("ReferenceEquality")
public RegionLocations mergeLocations(RegionLocations other) {
assert other != null;

Expand Down Expand Up @@ -280,6 +281,7 @@ private HRegionLocation selectRegionLocation(HRegionLocation oldLocation,
* @return an RegionLocations object with updated locations or the same object if nothing is
* updated
*/
@SuppressWarnings("ReferenceEquality")
public RegionLocations updateLocation(HRegionLocation location, boolean checkForEquals,
boolean force) {
assert location != null;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,8 @@ public interface RegionMetrics {
public long getCpRequestCount();

/**
* @return the number of write requests and read requests and coprocessor service requests made to
* region
* Returns the number of write requests and read requests and coprocessor service requests made to
* region
*/
default long getRequestCount() {
return getReadRequestCount() + getWriteRequestCount() + getCpRequestCount();
Expand Down Expand Up @@ -113,8 +113,8 @@ default String getNameAsString() {
int getStoreRefCount();

/**
* @return the max reference count for any store file among all compacted stores files of this
* region
* Returns the max reference count for any store file among all compacted stores files of this
* region
*/
int getMaxCompactedStoreFileRefCount();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,6 @@
@InterfaceAudience.Private
public final class ServerMetricsBuilder {

/**
* @param sn the server name
* @return a empty metrics
*/
public static ServerMetrics of(ServerName sn) {
return newBuilder(sn).build();
}
Expand Down Expand Up @@ -300,6 +296,7 @@ public int getVersionNumber() {
return versionNumber;
}

@Override
public String getVersion() {
return version;
}
Expand Down Expand Up @@ -414,16 +411,18 @@ public String toString() {
int currentMaxCompactedStoreFileRefCount = r.getMaxCompactedStoreFileRefCount();
maxCompactedStoreFileRefCount =
Math.max(maxCompactedStoreFileRefCount, currentMaxCompactedStoreFileRefCount);
uncompressedStoreFileSizeMB += r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
storeFileSizeMB += r.getStoreFileSize().get(Size.Unit.MEGABYTE);
memStoreSizeMB += r.getMemStoreSize().get(Size.Unit.MEGABYTE);
storefileIndexSizeKB += r.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
uncompressedStoreFileSizeMB +=
(long) r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
storeFileSizeMB += (long) r.getStoreFileSize().get(Size.Unit.MEGABYTE);
memStoreSizeMB += (long) r.getMemStoreSize().get(Size.Unit.MEGABYTE);
storefileIndexSizeKB +=
(long) r.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
readRequestsCount += r.getReadRequestCount();
cpRequestsCount += r.getCpRequestCount();
writeRequestsCount += r.getWriteRequestCount();
filteredReadRequestsCount += r.getFilteredReadRequestCount();
rootLevelIndexSizeKB += r.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE);
bloomFilterSizeMB += r.getBloomFilterSize().get(Size.Unit.MEGABYTE);
rootLevelIndexSizeKB += (long) r.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE);
bloomFilterSizeMB += (long) r.getBloomFilterSize().get(Size.Unit.MEGABYTE);
compactedCellCount += r.getCompactedCellCount();
compactingCellCount += r.getCompactingCellCount();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ interface ClientMetrics {
long getWriteRequestCount();

/**
* @return the number of write requests and read requests and coprocessor service requests made by
* the user
* Returns the number of write requests and read requests and coprocessor service requests made by
* the user
*/
default long getRequestCount() {
return getReadRequestCount() + getWriteRequestCount();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase;

import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.hbase.util.Strings;
Expand All @@ -30,7 +31,8 @@
public final class UserMetricsBuilder {

public static UserMetrics toUserMetrics(ClusterStatusProtos.UserLoad userLoad) {
UserMetricsBuilder builder = UserMetricsBuilder.newBuilder(userLoad.getUserName().getBytes());
UserMetricsBuilder builder =
UserMetricsBuilder.newBuilder(userLoad.getUserName().getBytes(StandardCharsets.UTF_8));
userLoad.getClientMetricsList().stream()
.map(clientMetrics -> new ClientMetricsImpl(clientMetrics.getHostName(),
clientMetrics.getReadRequestsCount(), clientMetrics.getWriteRequestsCount(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,9 @@ abstract class AbstractResponse {

public enum ResponseType {

SINGLE(0),
MULTI(1);
SINGLE,
MULTI;

ResponseType(int value) {
}
}

public abstract ResponseType type();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ private void populateStubs(Set<ServerName> addrs) throws IOException {
* Typically, you can use lambda expression to implement this interface as
*
* <pre>
* (c, s, d) -> s.xxx(c, your request here, d)
* (c, s, d) -&gt; s.xxx(c, your request here, d)
* </pre>
*/
@FunctionalInterface
Expand Down
Loading