diff --git a/hbase-assembly/src/main/assembly/components.xml b/hbase-assembly/src/main/assembly/components.xml index 2eb16e76497c..4dd85ef0170e 100644 --- a/hbase-assembly/src/main/assembly/components.xml +++ b/hbase-assembly/src/main/assembly/components.xml @@ -144,14 +144,6 @@ 0644 - - ${project.basedir}/../hbase-rsgroup/target/ - lib - - ${rsgroup.test.jar} - - 0644 - ${project.basedir}/../hbase-mapreduce/target/ lib diff --git a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml index 91d374987a62..6a21b9208731 100644 --- a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml +++ b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml @@ -52,7 +52,6 @@ org.apache.hbase:hbase-protocol-shaded org.apache.hbase:hbase-replication org.apache.hbase:hbase-rest - org.apache.hbase:hbase-rsgroup org.apache.hbase:hbase-server org.apache.hbase:hbase-shell org.apache.hbase:hbase-testing-util diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 8866eba94fc1..188bed64d599 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -23,6 +23,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -987,4 +988,9 @@ public ColumnFamilyDescriptor getColumnFamily(byte[] name) { protected ModifyableTableDescriptor getDelegateeForModification() { return delegatee; } + + @Override + public Optional getRegionServerGroup() { + return delegatee.getRegionServerGroup(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index c553a5656c16..745ae0fe102d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.replication.TableCFs; import org.apache.hadoop.hbase.client.security.SecurityCapability; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.quotas.QuotaFilter; import org.apache.hadoop.hbase.quotas.QuotaSettings; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotView; @@ -53,6 +54,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.SyncReplicationState; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.UserPermission; @@ -2232,4 +2234,71 @@ List hasUserPermissions(String userName, List permissions) default List hasUserPermissions(List permissions) throws IOException { return hasUserPermissions(null, permissions); } + + /** + * Gets group info for the given group name + * @param groupName the group name + * @return group info + * @throws IOException if a remote or network exception occurs + */ + RSGroupInfo getRSGroupInfo(String groupName) throws IOException; + + /** + * Move given set of servers to the specified target RegionServer group + * @param servers set of servers to move + * @param targetGroup the group to move servers to + * @throws IOException if a remote or network exception occurs + */ + void moveServers(Set
servers, String targetGroup) throws IOException; + + /** + * Creates a new RegionServer group with the given name + * @param groupName the name of the group + * @throws IOException if a remote or network exception occurs + */ + void addRSGroup(String groupName) throws IOException; + + /** + * Removes RegionServer group associated with the given name + * @param groupName the group name + * @throws IOException if a remote or network exception occurs + */ + void removeRSGroup(String groupName) throws IOException; + + /** + * Balance regions in the given RegionServer group + * @param groupName the group name + * @return boolean Whether balance ran or not + * @throws IOException if a remote or network exception occurs + */ + boolean balanceRSGroup(String groupName) throws IOException; + + /** + * Lists current set of RegionServer groups + * @throws IOException if a remote or network exception occurs + */ + List listRSGroups() throws IOException; + + /** + * Retrieve the RSGroupInfo a server is affiliated to + * @param hostPort HostPort to get RSGroupInfo for + * @throws IOException if a remote or network exception occurs + */ + RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException; + + /** + * Remove decommissioned servers from group + * 1. Sometimes we may find the server aborted due to some hardware failure and we must offline + * the server for repairing. Or we need to move some servers to join other clusters. + * So we need to remove these servers from the group. + * 2. Dead/recovering/live servers will be disallowed. + * @param servers set of servers to remove + * @throws IOException if a remote or network exception occurs + */ + void removeServers(Set
servers) throws IOException; + + RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException; + + void setRSGroupForTables(Set tables, String groupName) throws IOException; + } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index 599e5d69f6e0..dd8a09bd33fc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.client.replication.TableCFs; import org.apache.hadoop.hbase.client.security.SecurityCapability; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.quotas.QuotaFilter; import org.apache.hadoop.hbase.quotas.QuotaSettings; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotView; @@ -56,6 +57,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.SyncReplicationState; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.UserPermission; @@ -942,4 +944,55 @@ public List hasUserPermissions(String userName, List permis throws IOException { return get(admin.hasUserPermissions(userName, permissions)); } + + @Override + public RSGroupInfo getRSGroupInfo(String groupName) throws IOException { + return get(admin.getRSGroupInfo(groupName)); + } + + @Override + public void moveServers(Set
servers, String targetGroup) throws IOException { + get(admin.moveServers(servers, targetGroup)); + } + + @Override + public void addRSGroup(String groupName) throws IOException { + get(admin.addRSGroup(groupName)); + } + + @Override + public void removeRSGroup(String groupName) throws IOException { + get(admin.removeRSGroup(groupName)); + } + + @Override + public boolean balanceRSGroup(String groupName) throws IOException { + return get(admin.balanceRSGroup(groupName)); + } + + @Override + public List listRSGroups() throws IOException { + return get(admin.listRSGroups()); + } + + @Override + public RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException { + return get(admin.getRSGroupOfServer(hostPort)); + } + + @Override + public void removeServers(Set
servers) throws IOException { + get(admin.removeServers(servers)); + } + + @Override + public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException { + return get(admin.getRSGroupInfoOfTable(tableName)); + } + + @Override + public void setRSGroupForTables(Set tables, String groupName) throws IOException { + get(admin.setRSGroupForTables(tables, groupName)); + } + } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 75dc6d2a2185..d4f65d4217f3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -40,12 +40,14 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.replication.TableCFs; import org.apache.hadoop.hbase.client.security.SecurityCapability; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.quotas.QuotaFilter; import org.apache.hadoop.hbase.quotas.QuotaSettings; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotView; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.SyncReplicationState; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.UserPermission; @@ -1381,7 +1383,7 @@ default CompletableFuture> listDeadServers() { * @param newTableName name of the new table where the table will be created * @param preserveSplits True if the splits should be preserved */ - CompletableFuture cloneTableSchema(final TableName tableName, + CompletableFuture cloneTableSchema(final TableName tableName, final TableName newTableName, final boolean preserveSplits); /** @@ -1484,4 +1486,71 @@ CompletableFuture> hasUserPermissions(String userName, default CompletableFuture> hasUserPermissions(List permissions) { return hasUserPermissions(null, permissions); } + + /** + * Gets group info for the given group name + * @param groupName the group name + * @return group info + * @throws IOException if a remote or network exception occurs + */ + CompletableFuture getRSGroupInfo(String groupName) throws IOException; + + /** + * Move given set of servers to the specified target RegionServer group + * @param servers set of servers to move + * @param targetGroup the group to move servers to + * @throws IOException if a remote or network exception occurs + */ + CompletableFuture moveServers(Set
servers, String targetGroup) throws IOException; + + /** + * Creates a new RegionServer group with the given name + * @param groupName the name of the group + * @throws IOException if a remote or network exception occurs + */ + CompletableFuture addRSGroup(String groupName) throws IOException; + + /** + * Removes RegionServer group associated with the given name + * @param groupName the group name + * @throws IOException if a remote or network exception occurs + */ + CompletableFuture removeRSGroup(String groupName) throws IOException; + + /** + * Balance regions in the given RegionServer group + * @param groupName the group name + * @return boolean Whether balance ran or not + * @throws IOException if a remote or network exception occurs + */ + CompletableFuture balanceRSGroup(String groupName) throws IOException; + + /** + * Lists current set of RegionServer groups + * @throws IOException if a remote or network exception occurs + */ + CompletableFuture> listRSGroups() throws IOException; + + /** + * Retrieve the RSGroupInfo a server is affiliated to + * @param hostPort HostPort to get RSGroupInfo for + * @throws IOException if a remote or network exception occurs + */ + CompletableFuture getRSGroupOfServer(Address hostPort) throws IOException; + + /** + * Remove decommissioned servers from group + * 1. Sometimes we may find the server aborted due to some hardware failure and we must offline + * the server for repairing. Or we need to move some servers to join other clusters. + * So we need to remove these servers from the group. + * 2. Dead/recovering/live servers will be disallowed. + * @param servers set of servers to remove + * @throws IOException if a remote or network exception occurs + */ + CompletableFuture removeServers(Set
servers) throws IOException; + + CompletableFuture getRSGroupInfoOfTable(TableName tableName) throws IOException; + + CompletableFuture setRSGroupForTables(Set tables, String groupName) throws IOException; + } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 7787f188331d..6cd48861063a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import com.google.protobuf.RpcChannel; +import java.io.IOException; import java.util.EnumSet; import java.util.List; import java.util.Map; @@ -36,12 +37,14 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.replication.TableCFs; import org.apache.hadoop.hbase.client.security.SecurityCapability; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.quotas.QuotaFilter; import org.apache.hadoop.hbase.quotas.QuotaSettings; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.SyncReplicationState; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.UserPermission; @@ -826,4 +829,55 @@ public CompletableFuture> hasUserPermissions(String userName, List permissions) { return wrap(rawAdmin.hasUserPermissions(userName, permissions)); } + + @Override + public CompletableFuture getRSGroupInfo(String groupName) { + return wrap(rawAdmin.getRSGroupInfo(groupName)); + } + + @Override + public CompletableFuture moveServers(Set
servers, String targetGroup) { + return wrap(rawAdmin.moveServers(servers, targetGroup)); + } + + @Override + public CompletableFuture addRSGroup(String groupName) { + return wrap(rawAdmin.addRSGroup(groupName)); + } + + @Override + public CompletableFuture removeRSGroup(String groupName) { + return wrap(rawAdmin.removeRSGroup(groupName)); + } + + @Override + public CompletableFuture balanceRSGroup(String groupName) { + return wrap(rawAdmin.balanceRSGroup(groupName)); + } + + @Override + public CompletableFuture> listRSGroups() { + return wrap(rawAdmin.listRSGroups()); + } + + @Override + public CompletableFuture getRSGroupOfServer(Address hostPort) { + return wrap(rawAdmin.getRSGroupOfServer(hostPort)); + } + + @Override + public CompletableFuture removeServers(Set
servers) { + return wrap(rawAdmin.removeServers(servers)); + } + + @Override + public CompletableFuture getRSGroupInfoOfTable(TableName tableName) { + return wrap(rawAdmin.getRSGroupInfoOfTable(tableName)); + } + + @Override + public CompletableFuture setRSGroupForTables(Set tables, String groupName) { + return wrap(rawAdmin.setRSGroupForTables(tables, groupName)); + } + } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 47a79022cb4d..b17d2fceb896 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -78,6 +78,7 @@ import org.apache.hadoop.hbase.client.security.SecurityCapability; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.ipc.HBaseRpcController; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.quotas.QuotaFilter; import org.apache.hadoop.hbase.quotas.QuotaSettings; import org.apache.hadoop.hbase.quotas.QuotaTableUtil; @@ -86,6 +87,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.SyncReplicationState; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil; @@ -297,6 +299,26 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.SetRSGroupForTablesRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.SetRSGroupForTablesResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; /** @@ -3856,4 +3878,118 @@ public CompletableFuture> hasUserPermissions(String userName, resp -> resp.getHasUserPermissionList())) .call(); } + + @Override + public CompletableFuture getRSGroupInfo(String groupName) { + return this. newMasterCaller() + .action(((controller, stub) -> this. + call(controller, stub, + RequestConverter.buildGetRSGroupInfoRequest(groupName), + (s, c, req, done) -> s.getRSGroupInfo(c, req, done), + resp -> resp.hasRSGroupInfo() ? + ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()) : null))) + .call(); + } + + @Override + public CompletableFuture moveServers(Set
servers, String targetGroup) { + return this. newMasterCaller() + .action((controller, stub) -> this. + call(controller, stub, + RequestConverter.buildMoveServersRequest(servers, targetGroup), + (s, c, req, done) -> s.moveServers(c, req, done), resp -> null)) + .call(); + } + + @Override + public CompletableFuture addRSGroup(String groupName) { + return this. newMasterCaller() + .action(((controller, stub) -> this. + call(controller, stub, + AddRSGroupRequest.newBuilder().setRSGroupName(groupName).build(), + (s, c, req, done) -> s.addRSGroup(c, req, done), resp -> null))) + .call(); + } + + @Override + public CompletableFuture removeRSGroup(String groupName) { + return this. newMasterCaller() + .action((controller, stub) -> this. + call(controller, stub, + RemoveRSGroupRequest.newBuilder().setRSGroupName(groupName).build(), + (s, c, req, done) -> s.removeRSGroup(c, req, done), resp -> null)) + .call(); + } + + @Override + public CompletableFuture balanceRSGroup(String groupName) { + return this. newMasterCaller() + .action((controller, stub) -> this. + call(controller, stub, + BalanceRSGroupRequest.newBuilder().setRSGroupName(groupName).build(), + (s, c, req, done) -> s.balanceRSGroup(c, req, done), + resp -> resp.getBalanceRan())) + .call(); + } + + @Override + public CompletableFuture> listRSGroups() { + return this.> newMasterCaller() + .action((controller, stub) -> this + .> call(controller, + stub, ListRSGroupInfosRequest.getDefaultInstance(), + (s, c, req, done) -> s.listRSGroupInfos(c, req, done), + resp -> resp.getRSGroupInfoList().stream() + .map(r -> ProtobufUtil.toGroupInfo(r)) + .collect(Collectors.toList()))) + .call(); + } + + @Override + public CompletableFuture getRSGroupOfServer(Address hostPort) { + return this. newMasterCaller() + .action((controller, stub) -> this. + call( + controller, stub, + RequestConverter.buildGetRSGroupInfoOfServerRequest(hostPort), + (s, c, req, done) -> s.getRSGroupInfoOfServer(c, req, done), + resp -> resp.hasRSGroupInfo() ? + ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()) : null)) + .call(); + } + + @Override + public CompletableFuture removeServers(Set
servers) { + return this. newMasterCaller() + .action((controller, stub) -> this. + call(controller, stub, + RequestConverter.buildRemoveServersRequest(servers), + (s, c, req, done) -> s.removeServers(c, req, done), resp -> null)) + .call(); + } + + @Override + public CompletableFuture setRSGroupForTables(Set tables, String groupName) { + return this. newMasterCaller() + .action((controller, stub) -> this. + call(controller, stub, + RequestConverter.buildSetRSGroupForTablesRequest(tables, groupName), + (s, c, req, done) -> s.setRSGroupForTables(c, req, done), resp -> null)) + .call(); + } + + @Override + public CompletableFuture getRSGroupInfoOfTable(TableName table) { + return this. newMasterCaller() + .action((controller, stub) -> this. + call( + controller, stub, + GetRSGroupInfoOfTableRequest.newBuilder().setTableName( + ProtobufUtil.toProtoTableName(table)).build(), + (s, c, req, done) -> s.getRSGroupInfoOfTable(c, req, done), + resp -> resp.hasRSGroupInfo() ? + ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()) : null)) + .call(); + } + } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java index fc5e69e88c4a..a4523872c9c5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java @@ -23,6 +23,7 @@ import java.util.Comparator; import java.util.Iterator; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.stream.Stream; import org.apache.hadoop.hbase.HConstants; @@ -183,6 +184,13 @@ public interface TableDescriptor { @Deprecated String getOwnerString(); + /** + * Get the region server group this table belongs to. The regions of this table will be placed + * only on the region servers within this group. If not present, will be placed on + * {@link org.apache.hadoop.hbase.rsgroup.RSGroupInfo#DEFAULT_GROUP}. + */ + Optional getRegionServerGroup(); + /** * Getter for accessing the metadata associated with the key. * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index 037a7f860cbf..09ee0c53557c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -188,6 +189,9 @@ public class TableDescriptorBuilder { private static final Bytes PRIORITY_KEY = new Bytes(Bytes.toBytes(PRIORITY)); + private static final Bytes RSGROUP_KEY = + new Bytes(Bytes.toBytes(RSGroupInfo.TABLE_DESC_PROP_GROUP)); + /** * Relative priority of the table used for rpc scheduling */ @@ -537,6 +541,11 @@ public TableDescriptorBuilder setReplicationScope(int scope) { return this; } + public TableDescriptorBuilder setRegionServerGroup(String group) { + desc.setValue(RSGROUP_KEY, new Bytes(Bytes.toBytes(group))); + return this; + } + public TableDescriptor build() { return new ModifyableTableDescriptor(desc); } @@ -1577,6 +1586,16 @@ private static TableDescriptor parseFrom(final byte[] bytes) public int getColumnFamilyCount() { return families.size(); } + + @Override + public Optional getRegionServerGroup() { + Bytes value = values.get(RSGROUP_KEY); + if (value != null) { + return Optional.of(Bytes.toString(value.get(), value.getOffset(), value.getLength())); + } else { + return Optional.empty(); + } + } } private static Optional toCoprocessorDescriptor(String spec) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index ac0695b35ddd..7989c8935874 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -1768,23 +1768,36 @@ public static ServerName toServerName(final byte [] data) throws Deserialization return ServerName.valueOf(hostname, port, -1L); } + public static HBaseProtos.TimeRange toTimeRange(TimeRange timeRange) { + if (timeRange == null) { + timeRange = TimeRange.allTime(); + } + return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin()).setTo(timeRange.getMax()) + .build(); + } + public static RSGroupInfo toGroupInfo(RSGroupProtos.RSGroupInfo proto) { RSGroupInfo RSGroupInfo = new RSGroupInfo(proto.getName()); - for(HBaseProtos.ServerName el: proto.getServersList()) { + for (HBaseProtos.ServerName el : proto.getServersList()) { RSGroupInfo.addServer(Address.fromParts(el.getHostName(), el.getPort())); } - for(HBaseProtos.TableName pTableName: proto.getTablesList()) { + for (HBaseProtos.TableName pTableName : proto.getTablesList()) { RSGroupInfo.addTable(ProtobufUtil.toTableName(pTableName)); } return RSGroupInfo; } - public static HBaseProtos.TimeRange toTimeRange(TimeRange timeRange) { - if (timeRange == null) { - timeRange = TimeRange.allTime(); + public static RSGroupProtos.RSGroupInfo toProtoGroupInfo(RSGroupInfo pojo) { + List tables = new ArrayList<>(pojo.getTables().size()); + for (TableName arg : pojo.getTables()) { + tables.add(ProtobufUtil.toProtoTableName(arg)); + } + List hostports = new ArrayList<>(pojo.getServers().size()); + for (Address el : pojo.getServers()) { + hostports.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) + .setPort(el.getPort()).build()); } - return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin()) - .setTo(timeRange.getMax()) - .build(); + return RSGroupProtos.RSGroupInfo.newBuilder().setName(pojo.getName()).addAllServers(hostports) + .addAllTables(tables).build(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 353801f5a3ab..64543aa2996d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -91,6 +91,7 @@ import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.protobuf.ProtobufMagic; import org.apache.hadoop.hbase.protobuf.ProtobufMessageConverter; import org.apache.hadoop.hbase.quotas.QuotaScope; @@ -99,6 +100,7 @@ import org.apache.hadoop.hbase.quotas.ThrottleType; import org.apache.hadoop.hbase.replication.ReplicationLoadSink; import org.apache.hadoop.hbase.replication.ReplicationLoadSource; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.security.visibility.CellVisibility; import org.apache.hadoop.hbase.util.Addressing; @@ -175,6 +177,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; @@ -3287,4 +3290,29 @@ public static Set toCompactedStoreFiles(byte[] bytes) throws IOException } return Collections.emptySet(); } + + public static RSGroupInfo toGroupInfo(RSGroupProtos.RSGroupInfo proto) { + RSGroupInfo RSGroupInfo = new RSGroupInfo(proto.getName()); + for (HBaseProtos.ServerName el : proto.getServersList()) { + RSGroupInfo.addServer(Address.fromParts(el.getHostName(), el.getPort())); + } + for (HBaseProtos.TableName pTableName : proto.getTablesList()) { + RSGroupInfo.addTable(ProtobufUtil.toTableName(pTableName)); + } + return RSGroupInfo; + } + + public static RSGroupProtos.RSGroupInfo toProtoGroupInfo(RSGroupInfo pojo) { + List tables = new ArrayList<>(pojo.getTables().size()); + for (TableName arg : pojo.getTables()) { + tables.add(ProtobufUtil.toProtoTableName(arg)); + } + List hostports = new ArrayList<>(pojo.getServers().size()); + for (Address el : pojo.getServers()) { + hostports.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) + .setPort(el.getPort()).build()); + } + return RSGroupProtos.RSGroupInfo.newBuilder().setName(pojo.getName()).addAllServers(hostports) + .addAllTables(tables).build(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 1bad6bd3e308..e899316be713 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.util.Bytes; @@ -64,6 +65,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest; @@ -153,6 +155,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.SetRSGroupForTablesRequest; /** * Helper utility to build protocol buffer requests, @@ -1900,4 +1907,57 @@ private static List toEncodedRegionNameRegionSpecifiers( map(r -> buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, Bytes.toBytes(r))). collect(Collectors.toList()); } + + public static GetRSGroupInfoRequest buildGetRSGroupInfoRequest(String groupName) { + GetRSGroupInfoRequest.Builder builder = GetRSGroupInfoRequest.newBuilder(); + if (groupName != null && !groupName.isEmpty()) { + builder.setRSGroupName(groupName); + } + return builder.build(); + } + + public static MoveServersRequest buildMoveServersRequest(Set
servers, + String targetGroup) { + Set hostPorts = Sets.newHashSet(); + for (Address el : servers) { + hostPorts.add( + HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()).setPort(el.getPort()) + .build()); + } + return MoveServersRequest.newBuilder().setTargetGroup(targetGroup).addAllServers(hostPorts) + .build(); + } + + public static GetRSGroupInfoOfServerRequest buildGetRSGroupInfoOfServerRequest(Address hostPort) { + return GetRSGroupInfoOfServerRequest.newBuilder() + .setServer(HBaseProtos.ServerName.newBuilder() + .setHostName(hostPort.getHostname()) + .setPort(hostPort.getPort()) + .build()) + .build(); + } + + public static RemoveServersRequest buildRemoveServersRequest(Set
servers) { + Set hostPorts = Sets.newHashSet(); + for(Address el: servers) { + hostPorts.add(HBaseProtos.ServerName.newBuilder() + .setHostName(el.getHostname()) + .setPort(el.getPort()) + .build()); + } + return RemoveServersRequest.newBuilder() + .addAllServers(hostPorts) + .build(); + } + + public static SetRSGroupForTablesRequest buildSetRSGroupForTablesRequest( + Set tables, String groupName){ + SetRSGroupForTablesRequest.Builder builder = + SetRSGroupForTablesRequest.newBuilder().setTargetGroup(groupName); + for(TableName tableName: tables) { + builder.addTableName(ProtobufUtil.toProtoTableName(tableName)); + } + return builder.build(); + } + } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java index 25e827de0520..817e23744e36 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java @@ -19,10 +19,8 @@ package org.apache.hadoop.hbase.rsgroup; import java.util.Collection; -import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.net.Address; import org.apache.yetus.audience.InterfaceAudience; @@ -34,21 +32,38 @@ public class RSGroupInfo { public static final String DEFAULT_GROUP = "default"; public static final String NAMESPACE_DESC_PROP_GROUP = "hbase.rsgroup.name"; + public static final String TABLE_DESC_PROP_GROUP = "hbase.rsgroup.name"; private final String name; // Keep servers in a sorted set so has an expected ordering when displayed. private final SortedSet
servers; // Keep tables sorted too. + /** + * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information will be stored in + * the configuration of a table so this will be removed. + */ + @Deprecated private final SortedSet tables; public RSGroupInfo(String name) { this(name, new TreeSet
(), new TreeSet()); } + RSGroupInfo(String name, SortedSet
servers) { + this.name = name; + this.servers = servers == null ? new TreeSet<>() : new TreeSet<>(servers); + this.tables = new TreeSet<>(); + } + + /** + * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information for a table will be + * stored in the configuration of a table so this will be removed. + */ + @Deprecated RSGroupInfo(String name, SortedSet
servers, SortedSet tables) { this.name = name; this.servers = (servers == null) ? new TreeSet<>() : new TreeSet<>(servers); - this.tables = (tables == null) ? new TreeSet<>() : new TreeSet<>(tables); + this.tables = (tables == null) ? new TreeSet<>() : new TreeSet<>(tables); } public RSGroupInfo(RSGroupInfo src) { @@ -87,7 +102,7 @@ public boolean containsServer(Address hostPort) { /** * Get list of servers. */ - public Set
getServers() { + public SortedSet
getServers() { return servers; } @@ -100,23 +115,46 @@ public boolean removeServer(Address hostPort) { /** * Get set of tables that are members of the group. + * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information will be stored in + * the configuration of a table so this will be removed. */ + @Deprecated public SortedSet getTables() { return tables; } + /** + * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information will be stored in + * the configuration of a table so this will be removed. + */ + @Deprecated public void addTable(TableName table) { tables.add(table); } + /** + * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information will be stored in + * the configuration of a table so this will be removed. + */ + @Deprecated public void addAllTables(Collection arg) { tables.addAll(arg); } + /** + * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information will be stored in + * the configuration of a table so this will be removed. + */ + @Deprecated public boolean containsTable(TableName table) { return tables.contains(table); } + /** + * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information will be stored in + * the configuration of a table so this will be removed. + */ + @Deprecated public boolean removeTable(TableName table) { return tables.remove(table); } diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index 0fe578a99f9a..6b173f0ffd67 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -181,16 +181,6 @@ test-jar test - - org.apache.hbase - hbase-rsgroup - - - org.apache.hbase - hbase-rsgroup - test-jar - test - org.apache.hbase hbase-server diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto index 3429d0343dcb..a371fbc42be3 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto @@ -37,6 +37,7 @@ import "Quota.proto"; import "Replication.proto"; import "Snapshot.proto"; import "AccessControl.proto"; +import "RSGroupAdmin.proto"; /* Column-level protobufs */ @@ -1053,6 +1054,37 @@ service MasterService { /** returns a list of namespace names */ rpc ListNamespaces(ListNamespacesRequest) returns(ListNamespacesResponse); + + rpc GetRSGroupInfo(GetRSGroupInfoRequest) + returns (GetRSGroupInfoResponse); + + rpc GetRSGroupInfoOfServer(GetRSGroupInfoOfServerRequest) + returns (GetRSGroupInfoOfServerResponse); + + rpc MoveServers(MoveServersRequest) + returns (MoveServersResponse); + + rpc AddRSGroup(AddRSGroupRequest) + returns (AddRSGroupResponse); + + rpc RemoveRSGroup(RemoveRSGroupRequest) + returns (RemoveRSGroupResponse); + + rpc BalanceRSGroup(BalanceRSGroupRequest) + returns (BalanceRSGroupResponse); + + rpc ListRSGroupInfos(ListRSGroupInfosRequest) + returns (ListRSGroupInfosResponse); + + rpc RemoveServers(RemoveServersRequest) + returns (RemoveServersResponse); + + rpc GetRSGroupInfoOfTable(GetRSGroupInfoOfTableRequest) + returns (GetRSGroupInfoOfTableResponse); + + rpc SetRSGroupForTables(SetRSGroupForTablesRequest) + returns (SetRSGroupForTablesResponse); + } // HBCK Service definitions. diff --git a/hbase-rsgroup/src/test/resources/hbase-site.xml b/hbase-protocol-shaded/src/main/protobuf/RSGroup.proto similarity index 67% rename from hbase-rsgroup/src/test/resources/hbase-site.xml rename to hbase-protocol-shaded/src/main/protobuf/RSGroup.proto index 99d2ab8d1fbb..ede2b13cb5cc 100644 --- a/hbase-rsgroup/src/test/resources/hbase-site.xml +++ b/hbase-protocol-shaded/src/main/protobuf/RSGroup.proto @@ -1,8 +1,4 @@ - - - - - - hbase.defaults.for.version.skip - true - - - hbase.hconnection.threads.keepalivetime - 3 - - + +package hbase.pb; + +option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated"; +option java_outer_classname = "RSGroupProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; + +message RSGroupInfo { + required string name = 1; + repeated ServerName servers = 4; + repeated TableName tables = 3; +} diff --git a/hbase-protocol-shaded/src/main/protobuf/RSGroupAdmin.proto b/hbase-protocol-shaded/src/main/protobuf/RSGroupAdmin.proto new file mode 100644 index 000000000000..2297a22a37f0 --- /dev/null +++ b/hbase-protocol-shaded/src/main/protobuf/RSGroupAdmin.proto @@ -0,0 +1,170 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hbase.pb; + +option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated"; +option java_outer_classname = "RSGroupAdminProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; +import "RSGroup.proto"; + +/** Group level protobufs */ + +message ListTablesOfRSGroupRequest { + required string r_s_group_name = 1; +} + +message ListTablesOfRSGroupResponse { + repeated TableName table_name = 1; +} + +message GetRSGroupInfoRequest { + required string r_s_group_name = 1; +} + +message GetRSGroupInfoResponse { + optional RSGroupInfo r_s_group_info = 1; +} + +message GetRSGroupInfoOfTableRequest { + required TableName table_name = 1; +} + +message GetRSGroupInfoOfTableResponse { + optional RSGroupInfo r_s_group_info = 1; +} + +message MoveServersRequest { + required string target_group = 1; + repeated ServerName servers = 3; +} + +message MoveServersResponse { +} + +message MoveTablesRequest { + required string target_group = 1; + repeated TableName table_name = 2; +} + +message MoveTablesResponse { +} + +message AddRSGroupRequest { + required string r_s_group_name = 1; +} + +message AddRSGroupResponse { +} + +message RemoveRSGroupRequest { + required string r_s_group_name = 1; +} + +message RemoveRSGroupResponse { +} + +message BalanceRSGroupRequest { + required string r_s_group_name = 1; +} + +message BalanceRSGroupResponse { + required bool balanceRan = 1; +} + +message ListRSGroupInfosRequest { +} + +message ListRSGroupInfosResponse { + repeated RSGroupInfo r_s_group_info = 1; +} + +message GetRSGroupInfoOfServerRequest { + required ServerName server = 2; +} + +message GetRSGroupInfoOfServerResponse { + optional RSGroupInfo r_s_group_info = 1; +} + +message MoveServersAndTablesRequest { + required string target_group = 1; + repeated ServerName servers = 2; + repeated TableName table_name = 3; +} + +message MoveServersAndTablesResponse { +} + +message RemoveServersRequest { + repeated ServerName servers = 1; +} + +message RemoveServersResponse { +} + +message SetRSGroupForTablesRequest { + repeated TableName table_name = 1; + required string target_group = 2; +} + +message SetRSGroupForTablesResponse { +} + +service RSGroupAdminService { + rpc GetRSGroupInfo(GetRSGroupInfoRequest) + returns (GetRSGroupInfoResponse); + + rpc GetRSGroupInfoOfTable(GetRSGroupInfoOfTableRequest) + returns (GetRSGroupInfoOfTableResponse); + + rpc GetRSGroupInfoOfServer(GetRSGroupInfoOfServerRequest) + returns (GetRSGroupInfoOfServerResponse); + + rpc MoveServers(MoveServersRequest) + returns (MoveServersResponse); + + rpc MoveTables(MoveTablesRequest) + returns (MoveTablesResponse); + + rpc AddRSGroup(AddRSGroupRequest) + returns (AddRSGroupResponse); + + rpc RemoveRSGroup(RemoveRSGroupRequest) + returns (RemoveRSGroupResponse); + + rpc BalanceRSGroup(BalanceRSGroupRequest) + returns (BalanceRSGroupResponse); + + rpc ListRSGroupInfos(ListRSGroupInfosRequest) + returns (ListRSGroupInfosResponse); + + rpc MoveServersAndTables(MoveServersAndTablesRequest) + returns (MoveServersAndTablesResponse); + + rpc RemoveServers(RemoveServersRequest) + returns (RemoveServersResponse); + + rpc SetRSGroupForTables(SetRSGroupForTablesRequest) + returns (SetRSGroupForTablesResponse); + +} diff --git a/hbase-rsgroup/src/main/protobuf/RSGroupAdmin.proto b/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto similarity index 93% rename from hbase-rsgroup/src/main/protobuf/RSGroupAdmin.proto rename to hbase-protocol/src/main/protobuf/RSGroupAdmin.proto index 416097b2f0fc..3fe64d15f580 100644 --- a/hbase-rsgroup/src/main/protobuf/RSGroupAdmin.proto +++ b/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto @@ -122,6 +122,14 @@ message RemoveServersRequest { message RemoveServersResponse { } +message SetRSGroupForTablesRequest { + repeated TableName table_name = 1; + required string target_group = 2; +} + +message SetRSGroupForTablesResponse { +} + service RSGroupAdminService { rpc GetRSGroupInfo(GetRSGroupInfoRequest) returns (GetRSGroupInfoResponse); @@ -155,4 +163,7 @@ service RSGroupAdminService { rpc RemoveServers(RemoveServersRequest) returns (RemoveServersResponse); + + rpc SetRSGroupForTables(SetRSGroupForTablesRequest) + returns (SetRSGroupForTablesResponse); } diff --git a/hbase-rsgroup/README.txt b/hbase-rsgroup/README.txt deleted file mode 100644 index b24aee650603..000000000000 --- a/hbase-rsgroup/README.txt +++ /dev/null @@ -1,13 +0,0 @@ -ON PROTOBUFS -This maven module has protobuf definition files ('.protos') used by hbase -Coprocessor Endpoints that ship with hbase core including tests. Coprocessor -Endpoints are meant to be standalone, independent code not reliant on hbase -internals. They define their Service using protobuf. The protobuf version -they use can be distinct from that used by HBase internally since HBase started -shading its protobuf references. Endpoints have no access to the shaded protobuf -hbase uses. They do have access to the content of hbase-protocol -- the -.protos found in here -- but avoid using as much of this as you can as it is -liable to change. - -Generation of java files from protobuf .proto files included here is done as -part of the build. diff --git a/hbase-rsgroup/pom.xml b/hbase-rsgroup/pom.xml deleted file mode 100644 index b494a9afde7c..000000000000 --- a/hbase-rsgroup/pom.xml +++ /dev/null @@ -1,278 +0,0 @@ - - - - 4.0.0 - - hbase-build-configuration - org.apache.hbase - 3.0.0-SNAPSHOT - ../hbase-build-configuration - - hbase-rsgroup - Apache HBase - RSGroup - Regionserver Groups for HBase - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - org.xolstice.maven.plugins - protobuf-maven-plugin - - - compile-protoc - generate-sources - - compile - - - - ${basedir}/../hbase-protocol/src/main/protobuf - - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - - - - org.apache.hbase - hbase-annotations - test-jar - test - - - org.apache.hbase - hbase-client - - - org.apache.hbase - hbase-server - - - org.apache.hbase - hbase-common - - - org.apache.hbase - hbase-procedure - - - org.apache.hbase - hbase-protocol - - - org.apache.hbase - hbase-protocol-shaded - - - org.apache.hbase - hbase-testing-util - test - - - - org.apache.commons - commons-lang3 - - - org.slf4j - slf4j-api - - - org.apache.hbase.thirdparty - hbase-shaded-miscellaneous - - - com.google.protobuf - protobuf-java - - - org.apache.zookeeper - zookeeper - - - log4j - log4j - test - - - org.mockito - mockito-core - test - - - junit - junit - test - - - - - - skipRSGroupTests - - - skipRSGroupTests - - - - true - true - - - - - hadoop-2.0 - - - - - !hadoop.profile - - - - - com.github.stephenc.findbugs - findbugs-annotations - true - - - org.apache.hadoop - hadoop-common - - - net.java.dev.jets3t - jets3t - - - javax.servlet.jsp - jsp-api - - - org.mortbay.jetty - jetty - - - com.sun.jersey - jersey-server - - - com.sun.jersey - jersey-core - - - com.sun.jersey - jersey-json - - - javax.servlet - servlet-api - - - tomcat - jasper-compiler - - - tomcat - jasper-runtime - - - com.google.code.findbugs - jsr305 - - - - - - - - hadoop-3.0 - - - hadoop.profile - 3.0 - - - - 3.0-SNAPSHOT - - - - org.apache.hadoop - hadoop-common - - - - - eclipse-specific - - - m2e.version - - - - - - - - org.eclipse.m2e - lifecycle-mapping - - - - - - - - - - - - - diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java deleted file mode 100644 index e8a141064381..000000000000 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java +++ /dev/null @@ -1,239 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.rsgroup; - -import com.google.protobuf.ServiceException; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; - -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.net.Address; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; -import org.apache.yetus.audience.InterfaceAudience; - -import org.apache.hbase.thirdparty.com.google.common.collect.Sets; - -/** - * Client used for managing region server group information. - */ -@InterfaceAudience.Private -public class RSGroupAdminClient implements RSGroupAdmin { - private RSGroupAdminService.BlockingInterface stub; - private Admin admin; - - public RSGroupAdminClient(Connection conn) throws IOException { - admin = conn.getAdmin(); - stub = RSGroupAdminService.newBlockingStub(admin.coprocessorService()); - } - - @Override - public RSGroupInfo getRSGroupInfo(String groupName) throws IOException { - try { - GetRSGroupInfoResponse resp = stub.getRSGroupInfo(null, - GetRSGroupInfoRequest.newBuilder().setRSGroupName(groupName).build()); - if(resp.hasRSGroupInfo()) { - return RSGroupProtobufUtil.toGroupInfo(resp.getRSGroupInfo()); - } - return null; - } catch (ServiceException e) { - throw ProtobufUtil.handleRemoteException(e); - } - } - - @Override - public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException { - GetRSGroupInfoOfTableRequest request = GetRSGroupInfoOfTableRequest.newBuilder().setTableName( - ProtobufUtil.toProtoTableName(tableName)).build(); - try { - GetRSGroupInfoOfTableResponse resp = stub.getRSGroupInfoOfTable(null, request); - if (resp.hasRSGroupInfo()) { - return RSGroupProtobufUtil.toGroupInfo(resp.getRSGroupInfo()); - } - return null; - } catch (ServiceException e) { - throw ProtobufUtil.handleRemoteException(e); - } - } - - @Override - public void moveServers(Set
servers, String targetGroup) throws IOException { - Set hostPorts = Sets.newHashSet(); - for(Address el: servers) { - hostPorts.add(HBaseProtos.ServerName.newBuilder() - .setHostName(el.getHostname()) - .setPort(el.getPort()) - .build()); - } - MoveServersRequest request = MoveServersRequest.newBuilder() - .setTargetGroup(targetGroup) - .addAllServers(hostPorts) - .build(); - try { - stub.moveServers(null, request); - } catch (ServiceException e) { - throw ProtobufUtil.handleRemoteException(e); - } - } - - @Override - public void moveTables(Set tables, String targetGroup) throws IOException { - MoveTablesRequest.Builder builder = MoveTablesRequest.newBuilder().setTargetGroup(targetGroup); - for(TableName tableName: tables) { - builder.addTableName(ProtobufUtil.toProtoTableName(tableName)); - if (!admin.tableExists(tableName)) { - throw new TableNotFoundException(tableName); - } - } - try { - stub.moveTables(null, builder.build()); - } catch (ServiceException e) { - throw ProtobufUtil.handleRemoteException(e); - } - } - - @Override - public void addRSGroup(String groupName) throws IOException { - AddRSGroupRequest request = AddRSGroupRequest.newBuilder().setRSGroupName(groupName).build(); - try { - stub.addRSGroup(null, request); - } catch (ServiceException e) { - throw ProtobufUtil.handleRemoteException(e); - } - } - - @Override - public void removeRSGroup(String name) throws IOException { - RemoveRSGroupRequest request = RemoveRSGroupRequest.newBuilder().setRSGroupName(name).build(); - try { - stub.removeRSGroup(null, request); - } catch (ServiceException e) { - throw ProtobufUtil.handleRemoteException(e); - } - } - - @Override - public boolean balanceRSGroup(String groupName) throws IOException { - BalanceRSGroupRequest request = BalanceRSGroupRequest.newBuilder() - .setRSGroupName(groupName).build(); - try { - return stub.balanceRSGroup(null, request).getBalanceRan(); - } catch (ServiceException e) { - throw ProtobufUtil.handleRemoteException(e); - } - } - - @Override - public List listRSGroups() throws IOException { - try { - List resp = stub.listRSGroupInfos(null, - ListRSGroupInfosRequest.getDefaultInstance()).getRSGroupInfoList(); - List result = new ArrayList<>(resp.size()); - for(RSGroupProtos.RSGroupInfo entry : resp) { - result.add(RSGroupProtobufUtil.toGroupInfo(entry)); - } - return result; - } catch (ServiceException e) { - throw ProtobufUtil.handleRemoteException(e); - } - } - - @Override - public RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException { - GetRSGroupInfoOfServerRequest request = GetRSGroupInfoOfServerRequest.newBuilder() - .setServer(HBaseProtos.ServerName.newBuilder() - .setHostName(hostPort.getHostname()) - .setPort(hostPort.getPort()) - .build()) - .build(); - try { - GetRSGroupInfoOfServerResponse resp = stub.getRSGroupInfoOfServer(null, request); - if (resp.hasRSGroupInfo()) { - return RSGroupProtobufUtil.toGroupInfo(resp.getRSGroupInfo()); - } - return null; - } catch (ServiceException e) { - throw ProtobufUtil.handleRemoteException(e); - } - } - - @Override - public void moveServersAndTables(Set
servers, Set tables, String targetGroup) - throws IOException { - MoveServersAndTablesRequest.Builder builder = - MoveServersAndTablesRequest.newBuilder().setTargetGroup(targetGroup); - for(Address el: servers) { - builder.addServers(HBaseProtos.ServerName.newBuilder() - .setHostName(el.getHostname()) - .setPort(el.getPort()) - .build()); - } - for(TableName tableName: tables) { - builder.addTableName(ProtobufUtil.toProtoTableName(tableName)); - if (!admin.tableExists(tableName)) { - throw new TableNotFoundException(tableName); - } - } - try { - stub.moveServersAndTables(null, builder.build()); - } catch (ServiceException e) { - throw ProtobufUtil.handleRemoteException(e); - } - } - - @Override - public void removeServers(Set
servers) throws IOException { - Set hostPorts = Sets.newHashSet(); - for(Address el: servers) { - hostPorts.add(HBaseProtos.ServerName.newBuilder() - .setHostName(el.getHostname()) - .setPort(el.getPort()) - .build()); - } - RemoveServersRequest request = RemoveServersRequest.newBuilder() - .addAllServers(hostPorts) - .build(); - try { - stub.removeServers(null, request); - } catch (ServiceException e) { - throw ProtobufUtil.handleRemoteException(e); - } - } -} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java deleted file mode 100644 index 090ac6e907e8..000000000000 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java +++ /dev/null @@ -1,561 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.rsgroup; - -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; - -import org.apache.hadoop.hbase.CoprocessorEnvironment; -import org.apache.hadoop.hbase.HBaseIOException; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.MasterNotRunningException; -import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.hadoop.hbase.PleaseHoldException; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.SnapshotDescription; -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.constraint.ConstraintException; -import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor; -import org.apache.hadoop.hbase.coprocessor.HasMasterServices; -import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; -import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; -import org.apache.hadoop.hbase.coprocessor.MasterObserver; -import org.apache.hadoop.hbase.coprocessor.ObserverContext; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; -import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.net.Address; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.security.UserProvider; -import org.apache.hadoop.hbase.security.access.AccessChecker; -import org.apache.hadoop.hbase.security.access.Permission.Action; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hbase.thirdparty.com.google.common.collect.Sets; - -// TODO: Encapsulate MasterObserver functions into separate subclass. -@CoreCoprocessor -@InterfaceAudience.Private -public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { - private static final Logger LOG = LoggerFactory.getLogger(RSGroupAdminEndpoint.class); - - private MasterServices master = null; - // Only instance of RSGroupInfoManager. RSGroup aware load balancers ask for this instance on - // their setup. - private RSGroupInfoManager groupInfoManager; - private RSGroupAdminServer groupAdminServer; - private final RSGroupAdminService groupAdminService = new RSGroupAdminServiceImpl(); - private AccessChecker accessChecker; - - /** Provider for mapping principal names to Users */ - private UserProvider userProvider; - - @Override - public void start(CoprocessorEnvironment env) throws IOException { - if (!(env instanceof HasMasterServices)) { - throw new IOException("Does not implement HMasterServices"); - } - - master = ((HasMasterServices)env).getMasterServices(); - groupInfoManager = RSGroupInfoManagerImpl.getInstance(master); - groupAdminServer = new RSGroupAdminServer(master, groupInfoManager); - Class clazz = - master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null); - if (!RSGroupableBalancer.class.isAssignableFrom(clazz)) { - throw new IOException("Configured balancer does not support RegionServer groups."); - } - accessChecker = ((HasMasterServices) env).getMasterServices().getAccessChecker(); - - // set the user-provider. - this.userProvider = UserProvider.instantiate(env.getConfiguration()); - } - - @Override - public void stop(CoprocessorEnvironment env) { - } - - @Override - public Iterable getServices() { - return Collections.singleton(groupAdminService); - } - - @Override - public Optional getMasterObserver() { - return Optional.of(this); - } - - RSGroupInfoManager getGroupInfoManager() { - return groupInfoManager; - } - - /** - * Implementation of RSGroupAdminService defined in RSGroupAdmin.proto. - * This class calls {@link RSGroupAdminServer} for actual work, converts result to protocol - * buffer response, handles exceptions if any occurred and then calls the {@code RpcCallback} with - * the response. - */ - private class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService { - @Override - public void getRSGroupInfo(RpcController controller, - GetRSGroupInfoRequest request, RpcCallback done) { - GetRSGroupInfoResponse.Builder builder = GetRSGroupInfoResponse.newBuilder(); - String groupName = request.getRSGroupName(); - LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" - + groupName); - try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preGetRSGroupInfo(groupName); - } - checkPermission("getRSGroupInfo"); - RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName); - if (rsGroupInfo != null) { - builder.setRSGroupInfo(RSGroupProtobufUtil.toProtoGroupInfo(rsGroupInfo)); - } - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postGetRSGroupInfo(groupName); - } - } catch (IOException e) { - CoprocessorRpcUtils.setControllerException(controller, e); - } - done.run(builder.build()); - } - - @Override - public void getRSGroupInfoOfTable(RpcController controller, - GetRSGroupInfoOfTableRequest request, RpcCallback done) { - GetRSGroupInfoOfTableResponse.Builder builder = GetRSGroupInfoOfTableResponse.newBuilder(); - TableName tableName = ProtobufUtil.toTableName(request.getTableName()); - LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" - + tableName); - try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName); - } - checkPermission("getRSGroupInfoOfTable"); - RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupInfoOfTable(tableName); - if (RSGroupInfo != null) { - builder.setRSGroupInfo(RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo)); - } - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName); - } - } catch (IOException e) { - CoprocessorRpcUtils.setControllerException(controller, e); - } - done.run(builder.build()); - } - - @Override - public void moveServers(RpcController controller, MoveServersRequest request, - RpcCallback done) { - MoveServersResponse.Builder builder = MoveServersResponse.newBuilder(); - Set
hostPorts = Sets.newHashSet(); - for (HBaseProtos.ServerName el : request.getServersList()) { - hostPorts.add(Address.fromParts(el.getHostName(), el.getPort())); - } - LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts +" to rsgroup " - + request.getTargetGroup()); - try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup()); - } - checkPermission("moveServers"); - groupAdminServer.moveServers(hostPorts, request.getTargetGroup()); - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup()); - } - } catch (IOException e) { - CoprocessorRpcUtils.setControllerException(controller, e); - } - done.run(builder.build()); - } - - @Override - public void moveTables(RpcController controller, MoveTablesRequest request, - RpcCallback done) { - MoveTablesResponse.Builder builder = MoveTablesResponse.newBuilder(); - Set tables = new HashSet<>(request.getTableNameList().size()); - for (HBaseProtos.TableName tableName : request.getTableNameList()) { - tables.add(ProtobufUtil.toTableName(tableName)); - } - LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables +" to rsgroup " - + request.getTargetGroup()); - try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preMoveTables(tables, request.getTargetGroup()); - } - checkPermission("moveTables"); - groupAdminServer.moveTables(tables, request.getTargetGroup()); - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postMoveTables(tables, request.getTargetGroup()); - } - } catch (IOException e) { - CoprocessorRpcUtils.setControllerException(controller, e); - } - done.run(builder.build()); - } - - @Override - public void addRSGroup(RpcController controller, AddRSGroupRequest request, - RpcCallback done) { - AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder(); - LOG.info(master.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName()); - try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName()); - } - checkPermission("addRSGroup"); - groupAdminServer.addRSGroup(request.getRSGroupName()); - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName()); - } - } catch (IOException e) { - CoprocessorRpcUtils.setControllerException(controller, e); - } - done.run(builder.build()); - } - - @Override - public void removeRSGroup(RpcController controller, - RemoveRSGroupRequest request, RpcCallback done) { - RemoveRSGroupResponse.Builder builder = - RemoveRSGroupResponse.newBuilder(); - LOG.info(master.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName()); - try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName()); - } - checkPermission("removeRSGroup"); - groupAdminServer.removeRSGroup(request.getRSGroupName()); - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName()); - } - } catch (IOException e) { - CoprocessorRpcUtils.setControllerException(controller, e); - } - done.run(builder.build()); - } - - @Override - public void balanceRSGroup(RpcController controller, - BalanceRSGroupRequest request, RpcCallback done) { - BalanceRSGroupResponse.Builder builder = BalanceRSGroupResponse.newBuilder(); - LOG.info(master.getClientIdAuditPrefix() + " balance rsgroup, group=" + - request.getRSGroupName()); - try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName()); - } - checkPermission("balanceRSGroup"); - boolean balancerRan = groupAdminServer.balanceRSGroup(request.getRSGroupName()); - builder.setBalanceRan(balancerRan); - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(), - balancerRan); - } - } catch (IOException e) { - CoprocessorRpcUtils.setControllerException(controller, e); - builder.setBalanceRan(false); - } - done.run(builder.build()); - } - - @Override - public void listRSGroupInfos(RpcController controller, - ListRSGroupInfosRequest request, RpcCallback done) { - ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder(); - LOG.info(master.getClientIdAuditPrefix() + " list rsgroup"); - try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preListRSGroups(); - } - checkPermission("listRSGroup"); - for (RSGroupInfo RSGroupInfo : groupAdminServer.listRSGroups()) { - builder.addRSGroupInfo(RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo)); - } - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postListRSGroups(); - } - } catch (IOException e) { - CoprocessorRpcUtils.setControllerException(controller, e); - } - done.run(builder.build()); - } - - @Override - public void getRSGroupInfoOfServer(RpcController controller, - GetRSGroupInfoOfServerRequest request, RpcCallback done) { - GetRSGroupInfoOfServerResponse.Builder builder = GetRSGroupInfoOfServerResponse.newBuilder(); - Address hp = Address.fromParts(request.getServer().getHostName(), - request.getServer().getPort()); - LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" - + hp); - try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp); - } - checkPermission("getRSGroupInfoOfServer"); - RSGroupInfo info = groupAdminServer.getRSGroupOfServer(hp); - if (info != null) { - builder.setRSGroupInfo(RSGroupProtobufUtil.toProtoGroupInfo(info)); - } - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp); - } - } catch (IOException e) { - CoprocessorRpcUtils.setControllerException(controller, e); - } - done.run(builder.build()); - } - - @Override - public void moveServersAndTables(RpcController controller, - MoveServersAndTablesRequest request, RpcCallback done) { - MoveServersAndTablesResponse.Builder builder = MoveServersAndTablesResponse.newBuilder(); - Set
hostPorts = Sets.newHashSet(); - for (HBaseProtos.ServerName el : request.getServersList()) { - hostPorts.add(Address.fromParts(el.getHostName(), el.getPort())); - } - Set tables = new HashSet<>(request.getTableNameList().size()); - for (HBaseProtos.TableName tableName : request.getTableNameList()) { - tables.add(ProtobufUtil.toTableName(tableName)); - } - LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts - + " and tables " + tables + " to rsgroup" + request.getTargetGroup()); - try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preMoveServersAndTables(hostPorts, tables, - request.getTargetGroup()); - } - checkPermission("moveServersAndTables"); - groupAdminServer.moveServersAndTables(hostPorts, tables, request.getTargetGroup()); - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postMoveServersAndTables(hostPorts, tables, - request.getTargetGroup()); - } - } catch (IOException e) { - CoprocessorRpcUtils.setControllerException(controller, e); - } - done.run(builder.build()); - } - - @Override - public void removeServers(RpcController controller, - RemoveServersRequest request, - RpcCallback done) { - RemoveServersResponse.Builder builder = - RemoveServersResponse.newBuilder(); - Set
servers = Sets.newHashSet(); - for (HBaseProtos.ServerName el : request.getServersList()) { - servers.add(Address.fromParts(el.getHostName(), el.getPort())); - } - LOG.info(master.getClientIdAuditPrefix() - + " remove decommissioned servers from rsgroup: " + servers); - try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preRemoveServers(servers); - } - checkPermission("removeServers"); - groupAdminServer.removeServers(servers); - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postRemoveServers(servers); - } - } catch (IOException e) { - CoprocessorRpcUtils.setControllerException(controller, e); - } - done.run(builder.build()); - } - } - - boolean rsgroupHasServersOnline(TableDescriptor desc) throws IOException { - String groupName; - try { - groupName = - master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString()) - .getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); - if (groupName == null) { - groupName = RSGroupInfo.DEFAULT_GROUP; - } - } catch (MasterNotRunningException | PleaseHoldException e) { - LOG.info("Master has not initialized yet; temporarily using default RSGroup '" + - RSGroupInfo.DEFAULT_GROUP + "' for deploy of system table"); - groupName = RSGroupInfo.DEFAULT_GROUP; - } - - RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName); - if (rsGroupInfo == null) { - throw new ConstraintException( - "Default RSGroup (" + groupName + ") for this table's " + "namespace does not exist."); - } - - for (ServerName onlineServer : master.getServerManager().createDestinationServersList()) { - if (rsGroupInfo.getServers().contains(onlineServer.getAddress())) { - return true; - } - } - return false; - } - - void assignTableToGroup(TableDescriptor desc) throws IOException { - String groupName = - master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString()) - .getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); - if (groupName == null) { - groupName = RSGroupInfo.DEFAULT_GROUP; - } - RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName); - if (rsGroupInfo == null) { - throw new ConstraintException("Default RSGroup (" + groupName + ") for this table's " - + "namespace does not exist."); - } - if (!rsGroupInfo.containsTable(desc.getTableName())) { - LOG.debug("Pre-moving table " + desc.getTableName() + " to RSGroup " + groupName); - groupAdminServer.moveTables(Sets.newHashSet(desc.getTableName()), groupName); - } - } - - ///////////////////////////////////////////////////////////////////////////// - // MasterObserver overrides - ///////////////////////////////////////////////////////////////////////////// - - @Override - public void preCreateTableAction( - final ObserverContext ctx, - final TableDescriptor desc, - final RegionInfo[] regions) throws IOException { - if (!desc.getTableName().isSystemTable() && !rsgroupHasServersOnline(desc)) { - throw new HBaseIOException("No online servers in the rsgroup, which table " + - desc.getTableName().getNameAsString() + " belongs to"); - } - } - - // Assign table to default RSGroup. - @Override - public void postCreateTable(ObserverContext ctx, - TableDescriptor desc, RegionInfo[] regions) throws IOException { - assignTableToGroup(desc); - } - - // Remove table from its RSGroup. - @Override - public void postDeleteTable(ObserverContext ctx, - TableName tableName) throws IOException { - try { - RSGroupInfo group = groupAdminServer.getRSGroupInfoOfTable(tableName); - if (group != null) { - LOG.debug(String.format("Removing deleted table '%s' from rsgroup '%s'", tableName, - group.getName())); - groupAdminServer.moveTables(Sets.newHashSet(tableName), null); - } - } catch (IOException ex) { - LOG.debug("Failed to perform RSGroup information cleanup for table: " + tableName, ex); - } - } - - @Override - public void preCreateNamespace(ObserverContext ctx, - NamespaceDescriptor ns) throws IOException { - String group = ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); - if(group != null && groupAdminServer.getRSGroupInfo(group) == null) { - throw new ConstraintException("Region server group "+group+" does not exit"); - } - } - - @Override - public void preModifyNamespace(ObserverContext ctx, - NamespaceDescriptor currentNsDesc, NamespaceDescriptor newNsDesc) throws IOException { - preCreateNamespace(ctx, newNsDesc); - } - - @Override - public void preCloneSnapshot(ObserverContext ctx, - SnapshotDescription snapshot, TableDescriptor desc) throws IOException { - assignTableToGroup(desc); - } - - @Override - public void postClearDeadServers(ObserverContext ctx, - List servers, List notClearedServers) - throws IOException { - Set
clearedServer = servers.stream(). - filter(server -> !notClearedServers.contains(server)). - map(ServerName::getAddress). - collect(Collectors.toSet()); - if(!clearedServer.isEmpty()) { - groupAdminServer.removeServers(clearedServer); - } - } - - public void checkPermission(String request) throws IOException { - accessChecker.requirePermission(getActiveUser(), request, null, Action.ADMIN); - } - - /** - * Returns the active user to which authorization checks should be applied. - * If we are in the context of an RPC call, the remote user is used, - * otherwise the currently logged in user is used. - */ - private User getActiveUser() throws IOException { - // for non-rpc handling, fallback to system user - Optional optionalUser = RpcServer.getRequestUser(); - if (optionalUser.isPresent()) { - return optionalUser.get(); - } - return userProvider.getCurrent(); - } -} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java deleted file mode 100644 index f3ef4fb96d2a..000000000000 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java +++ /dev/null @@ -1,642 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.rsgroup; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; - -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.constraint.ConstraintException; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.master.LoadBalancer; -import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.RegionPlan; -import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.master.ServerManager; -import org.apache.hadoop.hbase.master.assignment.AssignmentManager; -import org.apache.hadoop.hbase.master.assignment.RegionStateNode; -import org.apache.hadoop.hbase.net.Address; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hbase.thirdparty.com.google.common.collect.Maps; - -/** - * Service to support Region Server Grouping (HBase-6721). - */ -@InterfaceAudience.Private -public class RSGroupAdminServer implements RSGroupAdmin { - private static final Logger LOG = LoggerFactory.getLogger(RSGroupAdminServer.class); - static final String KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE = "should keep at least " + - "one server in 'default' RSGroup."; - - private MasterServices master; - private final RSGroupInfoManager rsGroupInfoManager; - - /** Define the config key of retries threshold when movements failed */ - //made package private for testing - static final String FAILED_MOVE_MAX_RETRY = "hbase.rsgroup.move.max.retry"; - - /** Define the default number of retries */ - //made package private for testing - static final int DEFAULT_MAX_RETRY_VALUE = 50; - - private int moveMaxRetry; - - public RSGroupAdminServer(MasterServices master, RSGroupInfoManager rsGroupInfoManager) { - this.master = master; - this.rsGroupInfoManager = rsGroupInfoManager; - this.moveMaxRetry = master.getConfiguration().getInt(FAILED_MOVE_MAX_RETRY, - DEFAULT_MAX_RETRY_VALUE); - } - - @Override - public RSGroupInfo getRSGroupInfo(String groupName) throws IOException { - return rsGroupInfoManager.getRSGroup(groupName); - } - - @Override - public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException { - // We are reading across two Maps in the below with out synchronizing across - // them; should be safe most of the time. - String groupName = rsGroupInfoManager.getRSGroupOfTable(tableName); - return groupName == null? null: rsGroupInfoManager.getRSGroup(groupName); - } - - private void checkOnlineServersOnly(Set
servers) throws ConstraintException { - // This uglyness is because we only have Address, not ServerName. - // Online servers are keyed by ServerName. - Set
onlineServers = new HashSet<>(); - for(ServerName server: master.getServerManager().getOnlineServers().keySet()) { - onlineServers.add(server.getAddress()); - } - for (Address address: servers) { - if (!onlineServers.contains(address)) { - throw new ConstraintException( - "Server " + address + " is not an online server in 'default' RSGroup."); - } - } - } - - /** - * Check passed name. Fail if nulls or if corresponding RSGroupInfo not found. - * @return The RSGroupInfo named name - */ - private RSGroupInfo getAndCheckRSGroupInfo(String name) throws IOException { - if (StringUtils.isEmpty(name)) { - throw new ConstraintException("RSGroup cannot be null."); - } - RSGroupInfo rsGroupInfo = getRSGroupInfo(name); - if (rsGroupInfo == null) { - throw new ConstraintException("RSGroup does not exist: " + name); - } - return rsGroupInfo; - } - - /** - * @return List of Regions associated with this server. - */ - private List getRegions(final Address server) { - LinkedList regions = new LinkedList<>(); - for (Map.Entry el : - master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { - if (el.getValue() == null) { - continue; - } - - if (el.getValue().getAddress().equals(server)) { - addRegion(regions, el.getKey()); - } - } - for (RegionStateNode state : master.getAssignmentManager().getRegionsInTransition()) { - if (state.getRegionLocation() != null && - state.getRegionLocation().getAddress().equals(server)) { - addRegion(regions, state.getRegionInfo()); - } - } - return regions; - } - - private void addRegion(final LinkedList regions, RegionInfo hri) { - // If meta, move it last otherwise other unassigns fail because meta is not - // online for them to update state in. This is dodgy. Needs to be made more - // robust. See TODO below. - if (hri.isMetaRegion()) { - regions.addLast(hri); - } else { - regions.addFirst(hri); - } - } - - /** - * Check servers and tables. - * - * @param servers servers to move - * @param tables tables to move - * @param targetGroupName target group name - * @throws IOException if nulls or if servers and tables not belong to the same group - */ - private void checkServersAndTables(Set
servers, Set tables, - String targetGroupName) throws IOException { - // Presume first server's source group. Later ensure all servers are from this group. - Address firstServer = servers.iterator().next(); - RSGroupInfo tmpSrcGrp = rsGroupInfoManager.getRSGroupOfServer(firstServer); - if (tmpSrcGrp == null) { - // Be careful. This exception message is tested for in TestRSGroupsBase... - throw new ConstraintException("Source RSGroup for server " + firstServer - + " does not exist."); - } - RSGroupInfo srcGrp = new RSGroupInfo(tmpSrcGrp); - - // Only move online servers - checkOnlineServersOnly(servers); - - // Ensure all servers are of same rsgroup. - for (Address server: servers) { - String tmpGroup = rsGroupInfoManager.getRSGroupOfServer(server).getName(); - if (!tmpGroup.equals(srcGrp.getName())) { - throw new ConstraintException("Move server request should only come from one source " + - "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup); - } - } - - // Ensure all tables and servers are of same rsgroup. - for (TableName table : tables) { - String tmpGroup = rsGroupInfoManager.getRSGroupOfTable(table); - if (!tmpGroup.equals(srcGrp.getName())) { - throw new ConstraintException("Move table request should only come from one source " + - "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup); - } - } - - if (srcGrp.getServers().size() <= servers.size() && srcGrp.getTables().size() > tables.size()) { - throw new ConstraintException("Cannot leave a RSGroup " + srcGrp.getName() + - " that contains tables without servers to host them."); - } - } - - /** - * Move every region from servers which are currently located on these servers, - * but should not be located there. - * - * @param servers the servers that will move to new group - * @param targetGroupName the target group name - * @throws IOException if moving the server and tables fail - */ - private void moveServerRegionsFromGroup(Set
servers, String targetGroupName) - throws IOException { - moveRegionsBetweenGroups(servers, targetGroupName, - rs -> getRegions(rs), - info -> { - try { - RSGroupInfo group = getRSGroupInfo(targetGroupName); - return group.containsTable(info.getTable()); - } catch (IOException e) { - e.printStackTrace(); - return false; - } - }, - rs -> rs.getHostname()); - } - - /** - * Moves regions of tables which are not on target group servers. - * - * @param tables the tables that will move to new group - * @param targetGroupName the target group name - * @throws IOException if moving the region fails - */ - private void moveTableRegionsToGroup(Set tables, String targetGroupName) - throws IOException { - moveRegionsBetweenGroups(tables, targetGroupName, - table -> { - if (master.getAssignmentManager().isTableDisabled(table)) { - return new ArrayList<>(); - } - return master.getAssignmentManager().getRegionStates().getRegionsOfTable(table); - }, - info -> { - try { - RSGroupInfo group = getRSGroupInfo(targetGroupName); - ServerName sn = - master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(info); - return group.containsServer(sn.getAddress()); - } catch (IOException e) { - e.printStackTrace(); - return false; - } - }, - table -> table.getNameWithNamespaceInclAsString()); - } - - private void moveRegionsBetweenGroups(Set regionsOwners, String targetGroupName, - Function> getRegionsInfo, Function validation, - Function getOwnerName) throws IOException { - boolean hasRegionsToMove; - int retry = 0; - Set allOwners = new HashSet<>(regionsOwners); - Set failedRegions = new HashSet<>(); - IOException toThrow = null; - do { - hasRegionsToMove = false; - for (Iterator iter = allOwners.iterator(); iter.hasNext(); ) { - T owner = iter.next(); - // Get regions that are associated with this server and filter regions by group tables. - for (RegionInfo region : getRegionsInfo.apply(owner)) { - if (!validation.apply(region)) { - LOG.info("Moving region {}, which do not belong to RSGroup {}", - region.getShortNameToLog(), targetGroupName); - try { - this.master.getAssignmentManager().move(region); - failedRegions.remove(region.getRegionNameAsString()); - } catch (IOException ioe) { - LOG.debug("Move region {} from group failed, will retry, current retry time is {}", - region.getShortNameToLog(), retry, ioe); - toThrow = ioe; - failedRegions.add(region.getRegionNameAsString()); - } - if (master.getAssignmentManager().getRegionStates(). - getRegionState(region).isFailedOpen()) { - continue; - } - hasRegionsToMove = true; - } - } - - if (!hasRegionsToMove) { - LOG.info("No more regions to move from {} to RSGroup", getOwnerName.apply(owner)); - iter.remove(); - } - } - - retry++; - try { - rsGroupInfoManager.wait(1000); - } catch (InterruptedException e) { - LOG.warn("Sleep interrupted", e); - Thread.currentThread().interrupt(); - } - } while (hasRegionsToMove && retry <= moveMaxRetry); - - //has up to max retry time or there are no more regions to move - if (hasRegionsToMove) { - // print failed moved regions, for later process conveniently - String msg = String - .format("move regions for group %s failed, failed regions: %s", targetGroupName, - failedRegions); - LOG.error(msg); - throw new DoNotRetryIOException( - msg + ", just record the last failed region's cause, more details in server log", - toThrow); - } - } - - @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE", - justification="Ignoring complaint because don't know what it is complaining about") - @Override - public void moveServers(Set
servers, String targetGroupName) throws IOException { - if (servers == null) { - throw new ConstraintException("The list of servers to move cannot be null."); - } - if (servers.isEmpty()) { - // For some reason this difference between null servers and isEmpty is important distinction. - // TODO. Why? Stuff breaks if I equate them. - return; - } - //check target group - getAndCheckRSGroupInfo(targetGroupName); - - // Hold a lock on the manager instance while moving servers to prevent - // another writer changing our state while we are working. - synchronized (rsGroupInfoManager) { - // Presume first server's source group. Later ensure all servers are from this group. - Address firstServer = servers.iterator().next(); - RSGroupInfo srcGrp = rsGroupInfoManager.getRSGroupOfServer(firstServer); - if (srcGrp == null) { - // Be careful. This exception message is tested for in TestRSGroupsBase... - throw new ConstraintException("Source RSGroup for server " + firstServer - + " does not exist."); - } - // Only move online servers (when moving from 'default') or servers from other - // groups. This prevents bogus servers from entering groups - if (RSGroupInfo.DEFAULT_GROUP.equals(srcGrp.getName())) { - if (srcGrp.getServers().size() <= servers.size()) { - throw new ConstraintException(KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE); - } - checkOnlineServersOnly(servers); - } - // Ensure all servers are of same rsgroup. - for (Address server: servers) { - String tmpGroup = rsGroupInfoManager.getRSGroupOfServer(server).getName(); - if (!tmpGroup.equals(srcGrp.getName())) { - throw new ConstraintException("Move server request should only come from one source " + - "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup); - } - } - if (srcGrp.getServers().size() <= servers.size() && srcGrp.getTables().size() > 0) { - throw new ConstraintException("Cannot leave a RSGroup " + srcGrp.getName() + - " that contains tables without servers to host them."); - } - - // MovedServers may be < passed in 'servers'. - Set
movedServers = rsGroupInfoManager.moveServers(servers, srcGrp.getName(), - targetGroupName); - moveServerRegionsFromGroup(movedServers, targetGroupName); - LOG.info("Move servers done: {} => {}", srcGrp.getName(), targetGroupName); - } - } - - @Override - public void moveTables(Set tables, String targetGroup) throws IOException { - if (tables == null) { - throw new ConstraintException("The list of servers cannot be null."); - } - if (tables.size() < 1) { - LOG.debug("moveTables() passed an empty set. Ignoring."); - return; - } - - // Hold a lock on the manager instance while moving servers to prevent - // another writer changing our state while we are working. - synchronized (rsGroupInfoManager) { - if(targetGroup != null) { - RSGroupInfo destGroup = rsGroupInfoManager.getRSGroup(targetGroup); - if(destGroup == null) { - throw new ConstraintException("Target " + targetGroup + " RSGroup does not exist."); - } - if(destGroup.getServers().size() < 1) { - throw new ConstraintException("Target RSGroup must have at least one server."); - } - } - rsGroupInfoManager.moveTables(tables, targetGroup); - - // targetGroup is null when a table is being deleted. In this case no further - // action is required. - if (targetGroup != null) { - moveTableRegionsToGroup(tables, targetGroup); - } - } - } - - @Override - public void addRSGroup(String name) throws IOException { - rsGroupInfoManager.addRSGroup(new RSGroupInfo(name)); - } - - @Override - public void removeRSGroup(String name) throws IOException { - // Hold a lock on the manager instance while moving servers to prevent - // another writer changing our state while we are working. - synchronized (rsGroupInfoManager) { - RSGroupInfo rsGroupInfo = rsGroupInfoManager.getRSGroup(name); - if (rsGroupInfo == null) { - throw new ConstraintException("RSGroup " + name + " does not exist"); - } - int tableCount = rsGroupInfo.getTables().size(); - if (tableCount > 0) { - throw new ConstraintException("RSGroup " + name + " has " + tableCount + - " tables; you must remove these tables from the rsgroup before " + - "the rsgroup can be removed."); - } - int serverCount = rsGroupInfo.getServers().size(); - if (serverCount > 0) { - throw new ConstraintException("RSGroup " + name + " has " + serverCount + - " servers; you must remove these servers from the RSGroup before" + - "the RSGroup can be removed."); - } - for (NamespaceDescriptor ns : master.getClusterSchema().getNamespaces()) { - String nsGroup = ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); - if (nsGroup != null && nsGroup.equals(name)) { - throw new ConstraintException( - "RSGroup " + name + " is referenced by namespace: " + ns.getName()); - } - } - rsGroupInfoManager.removeRSGroup(name); - } - } - - @Override - public boolean balanceRSGroup(String groupName) throws IOException { - ServerManager serverManager = master.getServerManager(); - LoadBalancer balancer = master.getLoadBalancer(); - - synchronized (balancer) { - // If balance not true, don't run balancer. - if (!((HMaster) master).isBalancerOn()) { - return false; - } - - if (getRSGroupInfo(groupName) == null) { - throw new ConstraintException("RSGroup does not exist: "+groupName); - } - // Only allow one balance run at at time. - Map groupRIT = rsGroupGetRegionsInTransition(groupName); - if (groupRIT.size() > 0) { - LOG.debug("Not running balancer because {} region(s) in transition: {}", groupRIT.size(), - StringUtils.abbreviate( - master.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(), - 256)); - return false; - } - if (serverManager.areDeadServersInProgress()) { - LOG.debug("Not running balancer because processing dead regionserver(s): {}", - serverManager.getDeadServers()); - return false; - } - - //We balance per group instead of per table - List plans = new ArrayList<>(); - for(Map.Entry>> tableMap: - getRSGroupAssignmentsByTable(groupName).entrySet()) { - LOG.info("Creating partial plan for table {} : {}", tableMap.getKey(), tableMap.getValue()); - List partialPlans = balancer.balanceCluster(tableMap.getValue()); - LOG.info("Partial plan for table {} : {}", tableMap.getKey(), partialPlans); - if (partialPlans != null) { - plans.addAll(partialPlans); - } - } - boolean balancerRan = !plans.isEmpty(); - if (balancerRan) { - LOG.info("RSGroup balance {} starting with plan count: {}", groupName, plans.size()); - master.executeRegionPlansWithThrottling(plans); - LOG.info("RSGroup balance " + groupName + " completed"); - } - return balancerRan; - } - } - - @Override - public List listRSGroups() throws IOException { - return rsGroupInfoManager.listRSGroups(); - } - - @Override - public RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException { - return rsGroupInfoManager.getRSGroupOfServer(hostPort); - } - - @Override - public void moveServersAndTables(Set
servers, Set tables, String targetGroup) - throws IOException { - if (servers == null || servers.isEmpty()) { - throw new ConstraintException("The list of servers to move cannot be null or empty."); - } - if (tables == null || tables.isEmpty()) { - throw new ConstraintException("The list of tables to move cannot be null or empty."); - } - - //check target group - getAndCheckRSGroupInfo(targetGroup); - - // Hold a lock on the manager instance while moving servers and tables to prevent - // another writer changing our state while we are working. - synchronized (rsGroupInfoManager) { - //check servers and tables status - checkServersAndTables(servers, tables, targetGroup); - - //Move servers and tables to a new group. - String srcGroup = getRSGroupOfServer(servers.iterator().next()).getName(); - rsGroupInfoManager.moveServersAndTables(servers, tables, srcGroup, targetGroup); - - //move regions on these servers which do not belong to group tables - moveServerRegionsFromGroup(servers, targetGroup); - //move regions of these tables which are not on group servers - moveTableRegionsToGroup(tables, targetGroup); - } - LOG.info("Move servers and tables done. Severs: {}, Tables: {} => {}", servers, tables, - targetGroup); - } - - @Override - public void removeServers(Set
servers) throws IOException { - { - if (servers == null || servers.isEmpty()) { - throw new ConstraintException("The set of servers to remove cannot be null or empty."); - } - // Hold a lock on the manager instance while moving servers to prevent - // another writer changing our state while we are working. - synchronized (rsGroupInfoManager) { - //check the set of servers - checkForDeadOrOnlineServers(servers); - rsGroupInfoManager.removeServers(servers); - LOG.info("Remove decommissioned servers {} from RSGroup done", servers); - } - } - } - - private Map rsGroupGetRegionsInTransition(String groupName) - throws IOException { - Map rit = Maps.newTreeMap(); - AssignmentManager am = master.getAssignmentManager(); - for(TableName tableName : getRSGroupInfo(groupName).getTables()) { - for(RegionInfo regionInfo: am.getRegionStates().getRegionsOfTable(tableName)) { - RegionState state = am.getRegionStates().getRegionTransitionState(regionInfo); - if(state != null) { - rit.put(regionInfo.getEncodedName(), state); - } - } - } - return rit; - } - - private Map>> - getRSGroupAssignmentsByTable(String groupName) throws IOException { - Map>> result = Maps.newHashMap(); - RSGroupInfo rsGroupInfo = getRSGroupInfo(groupName); - Map>> assignments = Maps.newHashMap(); - for(Map.Entry entry: - master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { - TableName currTable = entry.getKey().getTable(); - ServerName currServer = entry.getValue(); - RegionInfo currRegion = entry.getKey(); - if (rsGroupInfo.getTables().contains(currTable)) { - assignments.putIfAbsent(currTable, new HashMap<>()); - assignments.get(currTable).putIfAbsent(currServer, new ArrayList<>()); - assignments.get(currTable).get(currServer).add(currRegion); - } - } - - Map> serverMap = Maps.newHashMap(); - for(ServerName serverName: master.getServerManager().getOnlineServers().keySet()) { - if(rsGroupInfo.getServers().contains(serverName.getAddress())) { - serverMap.put(serverName, Collections.emptyList()); - } - } - - // add all tables that are members of the group - for(TableName tableName : rsGroupInfo.getTables()) { - if(assignments.containsKey(tableName)) { - result.put(tableName, new HashMap<>()); - result.get(tableName).putAll(serverMap); - result.get(tableName).putAll(assignments.get(tableName)); - LOG.debug("Adding assignments for {}: {}", tableName, assignments.get(tableName)); - } - } - - return result; - } - - /** - * Check if the set of servers are belong to dead servers list or online servers list. - * @param servers servers to remove - */ - private void checkForDeadOrOnlineServers(Set
servers) throws ConstraintException { - // This uglyness is because we only have Address, not ServerName. - Set
onlineServers = new HashSet<>(); - List drainingServers = master.getServerManager().getDrainingServersList(); - for (ServerName server : master.getServerManager().getOnlineServers().keySet()) { - // Only online but not decommissioned servers are really online - if (!drainingServers.contains(server)) { - onlineServers.add(server.getAddress()); - } - } - - Set
deadServers = new HashSet<>(); - for(ServerName server: master.getServerManager().getDeadServers().copyServerNames()) { - deadServers.add(server.getAddress()); - } - - for (Address address: servers) { - if (onlineServers.contains(address)) { - throw new ConstraintException( - "Server " + address + " is an online server, not allowed to remove."); - } - if (deadServers.contains(address)) { - throw new ConstraintException( - "Server " + address + " is on the dead servers list," - + " Maybe it will come back again, not allowed to remove."); - } - } - } -} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java deleted file mode 100644 index b2d168a4ff29..000000000000 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ /dev/null @@ -1,801 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.rsgroup; - -import com.google.protobuf.ServiceException; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.NavigableSet; -import java.util.OptionalLong; -import java.util.Set; -import java.util.SortedSet; -import java.util.TreeSet; -import org.apache.hadoop.hbase.Coprocessor; -import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.constraint.ConstraintException; -import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; -import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.ServerListener; -import org.apache.hadoop.hbase.master.TableStateManager; -import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; -import org.apache.hadoop.hbase.net.Address; -import org.apache.hadoop.hbase.procedure2.Procedure; -import org.apache.hadoop.hbase.protobuf.ProtobufMagic; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; -import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.hadoop.hbase.zookeeper.ZNodePaths; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.zookeeper.KeeperException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hbase.thirdparty.com.google.common.collect.Maps; -import org.apache.hbase.thirdparty.com.google.common.collect.Sets; - -/** - * This is an implementation of {@link RSGroupInfoManager} which makes use of an HBase table as the - * persistence store for the group information. It also makes use of zookeeper to store group - * information needed for bootstrapping during offline mode. - *

Concurrency

RSGroup state is kept locally in Maps. There is a rsgroup name to cached - * RSGroupInfo Map at {@link #rsGroupMap} and a Map of tables to the name of the rsgroup they belong - * too (in {@link #tableMap}). These Maps are persisted to the hbase:rsgroup table (and cached in - * zk) on each modification. - *

- * Mutations on state are synchronized but reads can continue without having to wait on an instance - * monitor, mutations do wholesale replace of the Maps on update -- Copy-On-Write; the local Maps of - * state are read-only, just-in-case (see flushConfig). - *

- * Reads must not block else there is a danger we'll deadlock. - *

- * Clients of this class, the {@link RSGroupAdminEndpoint} for example, want to query and then act - * on the results of the query modifying cache in zookeeper without another thread making - * intermediate modifications. These clients synchronize on the 'this' instance so no other has - * access concurrently. Reads must be able to continue concurrently. - */ -@InterfaceAudience.Private -final class RSGroupInfoManagerImpl implements RSGroupInfoManager { - private static final Logger LOG = LoggerFactory.getLogger(RSGroupInfoManagerImpl.class); - - /** Table descriptor for hbase:rsgroup catalog table */ - private static final TableDescriptor RSGROUP_TABLE_DESC; - static { - TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(RSGROUP_TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(META_FAMILY_BYTES)) - .setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName()); - try { - builder.setCoprocessor( - CoprocessorDescriptorBuilder.newBuilder(MultiRowMutationEndpoint.class.getName()) - .setPriority(Coprocessor.PRIORITY_SYSTEM).build()); - } catch (IOException ex) { - throw new Error(ex); - } - RSGROUP_TABLE_DESC = builder.build(); - } - - // There two Maps are immutable and wholesale replaced on each modification - // so are safe to access concurrently. See class comment. - private volatile Map rsGroupMap = Collections.emptyMap(); - private volatile Map tableMap = Collections.emptyMap(); - - private final MasterServices masterServices; - private final Connection conn; - private final ZKWatcher watcher; - private final RSGroupStartupWorker rsGroupStartupWorker; - // contains list of groups that were last flushed to persistent store - private Set prevRSGroups = new HashSet<>(); - private final ServerEventsListenerThread serverEventsListenerThread = - new ServerEventsListenerThread(); - - private RSGroupInfoManagerImpl(MasterServices masterServices) throws IOException { - this.masterServices = masterServices; - this.watcher = masterServices.getZooKeeper(); - this.conn = masterServices.getConnection(); - this.rsGroupStartupWorker = new RSGroupStartupWorker(); - } - - - private synchronized void init() throws IOException { - refresh(); - serverEventsListenerThread.start(); - masterServices.getServerManager().registerListener(serverEventsListenerThread); - } - - static RSGroupInfoManager getInstance(MasterServices master) throws IOException { - RSGroupInfoManagerImpl instance = new RSGroupInfoManagerImpl(master); - instance.init(); - return instance; - } - - public void start() { - // create system table of rsgroup - rsGroupStartupWorker.start(); - } - - @Override - public synchronized void addRSGroup(RSGroupInfo rsGroupInfo) throws IOException { - checkGroupName(rsGroupInfo.getName()); - if (rsGroupMap.get(rsGroupInfo.getName()) != null || - rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { - throw new DoNotRetryIOException("Group already exists: " + rsGroupInfo.getName()); - } - Map newGroupMap = Maps.newHashMap(rsGroupMap); - newGroupMap.put(rsGroupInfo.getName(), rsGroupInfo); - flushConfig(newGroupMap); - } - - private RSGroupInfo getRSGroupInfo(final String groupName) throws DoNotRetryIOException { - RSGroupInfo rsGroupInfo = getRSGroup(groupName); - if (rsGroupInfo == null) { - throw new DoNotRetryIOException("RSGroup " + groupName + " does not exist"); - } - return rsGroupInfo; - } - - /** - * @param master the master to get online servers for - * @return Set of online Servers named for their hostname and port (not ServerName). - */ - private static Set

getOnlineServers(final MasterServices master) { - Set
onlineServers = new HashSet
(); - if (master == null) { - return onlineServers; - } - - for (ServerName server : master.getServerManager().getOnlineServers().keySet()) { - onlineServers.add(server.getAddress()); - } - return onlineServers; - } - - @Override - public synchronized Set
moveServers(Set
servers, String srcGroup, - String dstGroup) throws IOException { - RSGroupInfo src = getRSGroupInfo(srcGroup); - RSGroupInfo dst = getRSGroupInfo(dstGroup); - // If destination is 'default' rsgroup, only add servers that are online. If not online, drop - // it. If not 'default' group, add server to 'dst' rsgroup EVEN IF IT IS NOT online (could be a - // rsgroup of dead servers that are to come back later). - Set
onlineServers = - dst.getName().equals(RSGroupInfo.DEFAULT_GROUP) ? getOnlineServers(this.masterServices) - : null; - for (Address el : servers) { - src.removeServer(el); - if (onlineServers != null) { - if (!onlineServers.contains(el)) { - if (LOG.isDebugEnabled()) { - LOG.debug("Dropping " + el + " during move-to-default rsgroup because not online"); - } - continue; - } - } - dst.addServer(el); - } - Map newGroupMap = Maps.newHashMap(rsGroupMap); - newGroupMap.put(src.getName(), src); - newGroupMap.put(dst.getName(), dst); - flushConfig(newGroupMap); - return dst.getServers(); - } - - @Override - public RSGroupInfo getRSGroupOfServer(Address serverHostPort) throws IOException { - for (RSGroupInfo info : rsGroupMap.values()) { - if (info.containsServer(serverHostPort)) { - return info; - } - } - return null; - } - - @Override - public RSGroupInfo getRSGroup(String groupName) { - return rsGroupMap.get(groupName); - } - - @Override - public String getRSGroupOfTable(TableName tableName) { - return tableMap.get(tableName); - } - - @Override - public synchronized void moveTables(Set tableNames, String groupName) - throws IOException { - // Check if rsGroupMap contains the destination rsgroup - if (groupName != null && !rsGroupMap.containsKey(groupName)) { - throw new DoNotRetryIOException("Group " + groupName + " does not exist"); - } - - // Make a copy of rsGroupMap to update - Map newGroupMap = Maps.newHashMap(rsGroupMap); - - // Remove tables from their original rsgroups - // and update the copy of rsGroupMap - for (TableName tableName : tableNames) { - if (tableMap.containsKey(tableName)) { - RSGroupInfo src = new RSGroupInfo(newGroupMap.get(tableMap.get(tableName))); - src.removeTable(tableName); - newGroupMap.put(src.getName(), src); - } - } - - // Add tables to the destination rsgroup - // and update the copy of rsGroupMap - if (groupName != null) { - RSGroupInfo dstGroup = new RSGroupInfo(newGroupMap.get(groupName)); - dstGroup.addAllTables(tableNames); - newGroupMap.put(dstGroup.getName(), dstGroup); - } - - // Flush according to the updated copy of rsGroupMap - flushConfig(newGroupMap); - } - - @Override - public synchronized void removeRSGroup(String groupName) throws IOException { - if (!rsGroupMap.containsKey(groupName) || groupName.equals(RSGroupInfo.DEFAULT_GROUP)) { - throw new DoNotRetryIOException( - "Group " + groupName + " does not exist or is a reserved " + "group"); - } - Map newGroupMap = Maps.newHashMap(rsGroupMap); - newGroupMap.remove(groupName); - flushConfig(newGroupMap); - } - - @Override - public List listRSGroups() { - return Lists.newLinkedList(rsGroupMap.values()); - } - - @Override - public boolean isOnline() { - return rsGroupStartupWorker.isOnline(); - } - - @Override - public void moveServersAndTables(Set
servers, Set tables, String srcGroup, - String dstGroup) throws IOException { - // get server's group - RSGroupInfo srcGroupInfo = getRSGroupInfo(srcGroup); - RSGroupInfo dstGroupInfo = getRSGroupInfo(dstGroup); - - // move servers - for (Address el : servers) { - srcGroupInfo.removeServer(el); - dstGroupInfo.addServer(el); - } - // move tables - for (TableName tableName : tables) { - srcGroupInfo.removeTable(tableName); - dstGroupInfo.addTable(tableName); - } - - // flush changed groupinfo - Map newGroupMap = Maps.newHashMap(rsGroupMap); - newGroupMap.put(srcGroupInfo.getName(), srcGroupInfo); - newGroupMap.put(dstGroupInfo.getName(), dstGroupInfo); - flushConfig(newGroupMap); - } - - @Override - public synchronized void removeServers(Set
servers) throws IOException { - Map rsGroupInfos = new HashMap(); - for (Address el : servers) { - RSGroupInfo rsGroupInfo = getRSGroupOfServer(el); - if (rsGroupInfo != null) { - RSGroupInfo newRsGroupInfo = rsGroupInfos.get(rsGroupInfo.getName()); - if (newRsGroupInfo == null) { - rsGroupInfo.removeServer(el); - rsGroupInfos.put(rsGroupInfo.getName(), rsGroupInfo); - } else { - newRsGroupInfo.removeServer(el); - rsGroupInfos.put(newRsGroupInfo.getName(), newRsGroupInfo); - } - } else { - LOG.warn("Server " + el + " does not belong to any rsgroup."); - } - } - - if (rsGroupInfos.size() > 0) { - Map newGroupMap = Maps.newHashMap(rsGroupMap); - newGroupMap.putAll(rsGroupInfos); - flushConfig(newGroupMap); - } - } - - List retrieveGroupListFromGroupTable() throws IOException { - List rsGroupInfoList = Lists.newArrayList(); - try (Table table = conn.getTable(RSGROUP_TABLE_NAME); - ResultScanner scanner = table.getScanner(new Scan())) { - for (Result result;;) { - result = scanner.next(); - if (result == null) { - break; - } - RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo - .parseFrom(result.getValue(META_FAMILY_BYTES, META_QUALIFIER_BYTES)); - rsGroupInfoList.add(RSGroupProtobufUtil.toGroupInfo(proto)); - } - } - return rsGroupInfoList; - } - - List retrieveGroupListFromZookeeper() throws IOException { - String groupBasePath = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, rsGroupZNode); - List RSGroupInfoList = Lists.newArrayList(); - // Overwrite any info stored by table, this takes precedence - try { - if (ZKUtil.checkExists(watcher, groupBasePath) != -1) { - List children = ZKUtil.listChildrenAndWatchForNewChildren(watcher, groupBasePath); - if (children == null) { - return RSGroupInfoList; - } - for (String znode : children) { - byte[] data = ZKUtil.getData(watcher, ZNodePaths.joinZNode(groupBasePath, znode)); - if (data.length > 0) { - ProtobufUtil.expectPBMagicPrefix(data); - ByteArrayInputStream bis = - new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length); - RSGroupInfoList - .add(RSGroupProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis))); - } - } - LOG.debug("Read ZK GroupInfo count:" + RSGroupInfoList.size()); - } - } catch (KeeperException | DeserializationException | InterruptedException e) { - throw new IOException("Failed to read rsGroupZNode", e); - } - return RSGroupInfoList; - } - - @Override - public void refresh() throws IOException { - refresh(false); - } - - /** - * Read rsgroup info from the source of truth, the hbase:rsgroup table. Update zk cache. Called on - * startup of the manager. - */ - private synchronized void refresh(boolean forceOnline) throws IOException { - List groupList = new LinkedList<>(); - - // Overwrite anything read from zk, group table is source of truth - // if online read from GROUP table - if (forceOnline || isOnline()) { - LOG.debug("Refreshing in Online mode."); - groupList.addAll(retrieveGroupListFromGroupTable()); - } else { - LOG.debug("Refreshing in Offline mode."); - groupList.addAll(retrieveGroupListFromZookeeper()); - } - - // refresh default group, prune - NavigableSet orphanTables = new TreeSet<>(); - for (String entry : masterServices.getTableDescriptors().getAll().keySet()) { - orphanTables.add(TableName.valueOf(entry)); - } - for (RSGroupInfo group : groupList) { - if (!group.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { - orphanTables.removeAll(group.getTables()); - } - } - - // This is added to the last of the list so it overwrites the 'default' rsgroup loaded - // from region group table or zk - groupList.add(new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, getDefaultServers(), orphanTables)); - - // populate the data - HashMap newGroupMap = Maps.newHashMap(); - HashMap newTableMap = Maps.newHashMap(); - for (RSGroupInfo group : groupList) { - newGroupMap.put(group.getName(), group); - for (TableName table : group.getTables()) { - newTableMap.put(table, group.getName()); - } - } - resetRSGroupAndTableMaps(newGroupMap, newTableMap); - updateCacheOfRSGroups(rsGroupMap.keySet()); - } - - private synchronized Map flushConfigTable(Map groupMap) - throws IOException { - Map newTableMap = Maps.newHashMap(); - List mutations = Lists.newArrayList(); - - // populate deletes - for (String groupName : prevRSGroups) { - if (!groupMap.containsKey(groupName)) { - Delete d = new Delete(Bytes.toBytes(groupName)); - mutations.add(d); - } - } - - // populate puts - for (RSGroupInfo RSGroupInfo : groupMap.values()) { - RSGroupProtos.RSGroupInfo proto = RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo); - Put p = new Put(Bytes.toBytes(RSGroupInfo.getName())); - p.addColumn(META_FAMILY_BYTES, META_QUALIFIER_BYTES, proto.toByteArray()); - mutations.add(p); - for (TableName entry : RSGroupInfo.getTables()) { - newTableMap.put(entry, RSGroupInfo.getName()); - } - } - - if (mutations.size() > 0) { - multiMutate(mutations); - } - return newTableMap; - } - - private synchronized void flushConfig() throws IOException { - flushConfig(this.rsGroupMap); - } - - private synchronized void flushConfig(Map newGroupMap) throws IOException { - Map newTableMap; - - // For offline mode persistence is still unavailable - // We're refreshing in-memory state but only for servers in default group - if (!isOnline()) { - if (newGroupMap == this.rsGroupMap) { - // When newGroupMap is this.rsGroupMap itself, - // do not need to check default group and other groups as followed - return; - } - - Map oldGroupMap = Maps.newHashMap(rsGroupMap); - RSGroupInfo oldDefaultGroup = oldGroupMap.remove(RSGroupInfo.DEFAULT_GROUP); - RSGroupInfo newDefaultGroup = newGroupMap.remove(RSGroupInfo.DEFAULT_GROUP); - if (!oldGroupMap.equals(newGroupMap) /* compare both tables and servers in other groups */ || - !oldDefaultGroup.getTables().equals(newDefaultGroup.getTables()) - /* compare tables in default group */) { - throw new IOException("Only servers in default group can be updated during offline mode"); - } - - // Restore newGroupMap by putting its default group back - newGroupMap.put(RSGroupInfo.DEFAULT_GROUP, newDefaultGroup); - - // Refresh rsGroupMap - // according to the inputted newGroupMap (an updated copy of rsGroupMap) - rsGroupMap = newGroupMap; - - // Do not need to update tableMap - // because only the update on servers in default group is allowed above, - // or IOException will be thrown - return; - } - - /* For online mode, persist to Zookeeper */ - newTableMap = flushConfigTable(newGroupMap); - - // Make changes visible after having been persisted to the source of truth - resetRSGroupAndTableMaps(newGroupMap, newTableMap); - - try { - String groupBasePath = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, rsGroupZNode); - ZKUtil.createAndFailSilent(watcher, groupBasePath, ProtobufMagic.PB_MAGIC); - - List zkOps = new ArrayList<>(newGroupMap.size()); - for (String groupName : prevRSGroups) { - if (!newGroupMap.containsKey(groupName)) { - String znode = ZNodePaths.joinZNode(groupBasePath, groupName); - zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode)); - } - } - - for (RSGroupInfo RSGroupInfo : newGroupMap.values()) { - String znode = ZNodePaths.joinZNode(groupBasePath, RSGroupInfo.getName()); - RSGroupProtos.RSGroupInfo proto = RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo); - LOG.debug("Updating znode: " + znode); - ZKUtil.createAndFailSilent(watcher, znode); - zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode)); - zkOps.add(ZKUtil.ZKUtilOp.createAndFailSilent(znode, - ProtobufUtil.prependPBMagic(proto.toByteArray()))); - } - LOG.debug("Writing ZK GroupInfo count: " + zkOps.size()); - - ZKUtil.multiOrSequential(watcher, zkOps, false); - } catch (KeeperException e) { - LOG.error("Failed to write to rsGroupZNode", e); - masterServices.abort("Failed to write to rsGroupZNode", e); - throw new IOException("Failed to write to rsGroupZNode", e); - } - updateCacheOfRSGroups(newGroupMap.keySet()); - } - - /** - * Make changes visible. Caller must be synchronized on 'this'. - */ - private void resetRSGroupAndTableMaps(Map newRSGroupMap, - Map newTableMap) { - // Make maps Immutable. - this.rsGroupMap = Collections.unmodifiableMap(newRSGroupMap); - this.tableMap = Collections.unmodifiableMap(newTableMap); - } - - /** - * Update cache of rsgroups. Caller must be synchronized on 'this'. - * @param currentGroups Current list of Groups. - */ - private void updateCacheOfRSGroups(final Set currentGroups) { - this.prevRSGroups.clear(); - this.prevRSGroups.addAll(currentGroups); - } - - // Called by getDefaultServers. Presume it has lock in place. - private List getOnlineRS() throws IOException { - if (masterServices != null) { - return masterServices.getServerManager().getOnlineServersList(); - } - LOG.debug("Reading online RS from zookeeper"); - List servers = new LinkedList<>(); - try { - for (String el : ZKUtil.listChildrenNoWatch(watcher, watcher.getZNodePaths().rsZNode)) { - servers.add(ServerName.parseServerName(el)); - } - } catch (KeeperException e) { - throw new IOException("Failed to retrieve server list from zookeeper", e); - } - return servers; - } - - // Called by ServerEventsListenerThread. Presume it has lock on this manager when it runs. - private SortedSet
getDefaultServers() throws IOException { - // Build a list of servers in other groups than default group, from rsGroupMap - Set
serversInOtherGroup = new HashSet<>(); - for (RSGroupInfo group : listRSGroups() /* get from rsGroupMap */) { - if (!RSGroupInfo.DEFAULT_GROUP.equals(group.getName())) { // not default group - serversInOtherGroup.addAll(group.getServers()); - } - } - - // Get all online servers from Zookeeper and find out servers in default group - SortedSet
defaultServers = Sets.newTreeSet(); - for (ServerName serverName : getOnlineRS()) { - Address server = Address.fromParts(serverName.getHostname(), serverName.getPort()); - if (!serversInOtherGroup.contains(server)) { // not in other groups - defaultServers.add(server); - } - } - return defaultServers; - } - - // Called by ServerEventsListenerThread. Synchronize on this because redoing - // the rsGroupMap then writing it out. - private synchronized void updateDefaultServers(SortedSet
servers) throws IOException { - RSGroupInfo info = rsGroupMap.get(RSGroupInfo.DEFAULT_GROUP); - RSGroupInfo newInfo = new RSGroupInfo(info.getName(), servers, info.getTables()); - HashMap newGroupMap = Maps.newHashMap(rsGroupMap); - newGroupMap.put(newInfo.getName(), newInfo); - flushConfig(newGroupMap); - } - - /** - * Calls {@link RSGroupInfoManagerImpl#updateDefaultServers(SortedSet)} to update list of known - * servers. Notifications about server changes are received by registering {@link ServerListener}. - * As a listener, we need to return immediately, so the real work of updating the servers is done - * asynchronously in this thread. - */ - private class ServerEventsListenerThread extends Thread implements ServerListener { - private final Logger LOG = LoggerFactory.getLogger(ServerEventsListenerThread.class); - private boolean changed = false; - - ServerEventsListenerThread() { - setDaemon(true); - } - - @Override - public void serverAdded(ServerName serverName) { - serverChanged(); - } - - @Override - public void serverRemoved(ServerName serverName) { - serverChanged(); - } - - private synchronized void serverChanged() { - changed = true; - this.notify(); - } - - @Override - public void run() { - setName(ServerEventsListenerThread.class.getName() + "-" + masterServices.getServerName()); - SortedSet
prevDefaultServers = new TreeSet<>(); - while (isMasterRunning(masterServices)) { - try { - LOG.info("Updating default servers."); - SortedSet
servers = RSGroupInfoManagerImpl.this.getDefaultServers(); - if (!servers.equals(prevDefaultServers)) { - RSGroupInfoManagerImpl.this.updateDefaultServers(servers); - prevDefaultServers = servers; - LOG.info("Updated with servers: " + servers.size()); - } - try { - synchronized (this) { - while (!changed) { - wait(); - } - changed = false; - } - } catch (InterruptedException e) { - LOG.warn("Interrupted", e); - } - } catch (IOException e) { - LOG.warn("Failed to update default servers", e); - } - } - } - } - - private class RSGroupStartupWorker extends Thread { - private final Logger LOG = LoggerFactory.getLogger(RSGroupStartupWorker.class); - private volatile boolean online = false; - - RSGroupStartupWorker() { - super(RSGroupStartupWorker.class.getName() + "-" + masterServices.getServerName()); - setDaemon(true); - } - - @Override - public void run() { - if (waitForGroupTableOnline()) { - LOG.info("GroupBasedLoadBalancer is now online"); - } else { - LOG.warn("Quit without making region group table online"); - } - } - - private boolean waitForGroupTableOnline() { - while (isMasterRunning(masterServices)) { - try { - TableStateManager tsm = masterServices.getTableStateManager(); - if (!tsm.isTablePresent(RSGROUP_TABLE_NAME)) { - createRSGroupTable(); - } - // try reading from the table - try (Table table = conn.getTable(RSGROUP_TABLE_NAME)) { - table.get(new Get(ROW_KEY)); - } - LOG.info( - "RSGroup table=" + RSGROUP_TABLE_NAME + " is online, refreshing cached information"); - RSGroupInfoManagerImpl.this.refresh(true); - online = true; - // flush any inconsistencies between ZK and HTable - RSGroupInfoManagerImpl.this.flushConfig(); - return true; - } catch (Exception e) { - LOG.warn("Failed to perform check", e); - // 100ms is short so let's just ignore the interrupt - Threads.sleepWithoutInterrupt(100); - } - } - return false; - } - - private void createRSGroupTable() throws IOException { - OptionalLong optProcId = masterServices.getProcedures().stream() - .filter(p -> p instanceof CreateTableProcedure).map(p -> (CreateTableProcedure) p) - .filter(p -> p.getTableName().equals(RSGROUP_TABLE_NAME)).mapToLong(Procedure::getProcId) - .findFirst(); - long procId; - if (optProcId.isPresent()) { - procId = optProcId.getAsLong(); - } else { - procId = masterServices.createSystemTable(RSGROUP_TABLE_DESC); - } - // wait for region to be online - int tries = 600; - while (!(masterServices.getMasterProcedureExecutor().isFinished(procId)) && - masterServices.getMasterProcedureExecutor().isRunning() && tries > 0) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - throw new IOException("Wait interrupted ", e); - } - tries--; - } - if (tries <= 0) { - throw new IOException("Failed to create group table in a given time."); - } else { - Procedure result = masterServices.getMasterProcedureExecutor().getResult(procId); - if (result != null && result.isFailed()) { - throw new IOException( - "Failed to create group table. " + MasterProcedureUtil.unwrapRemoteIOException(result)); - } - } - } - - public boolean isOnline() { - return online; - } - } - - private static boolean isMasterRunning(MasterServices masterServices) { - return !masterServices.isAborted() && !masterServices.isStopped(); - } - - private void multiMutate(List mutations) throws IOException { - try (Table table = conn.getTable(RSGROUP_TABLE_NAME)) { - CoprocessorRpcChannel channel = table.coprocessorService(ROW_KEY); - MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder = - MultiRowMutationProtos.MutateRowsRequest.newBuilder(); - for (Mutation mutation : mutations) { - if (mutation instanceof Put) { - mmrBuilder.addMutationRequest(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation( - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType.PUT, - mutation)); - } else if (mutation instanceof Delete) { - mmrBuilder.addMutationRequest(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation( - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType.DELETE, - mutation)); - } else { - throw new DoNotRetryIOException( - "multiMutate doesn't support " + mutation.getClass().getName()); - } - } - - MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service = - MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel); - try { - service.mutateRows(null, mmrBuilder.build()); - } catch (ServiceException ex) { - ProtobufUtil.toIOException(ex); - } - } - } - - private void checkGroupName(String groupName) throws ConstraintException { - if (!groupName.matches("[a-zA-Z0-9_]+")) { - throw new ConstraintException("RSGroup name should only contain alphanumeric characters"); - } - } -} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupProtobufUtil.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupProtobufUtil.java deleted file mode 100644 index 56e35e76197c..000000000000 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupProtobufUtil.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.rsgroup; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.net.Address; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; -import org.apache.yetus.audience.InterfaceAudience; - -@InterfaceAudience.Private -final class RSGroupProtobufUtil { - private RSGroupProtobufUtil() { - } - - static RSGroupInfo toGroupInfo(RSGroupProtos.RSGroupInfo proto) { - RSGroupInfo RSGroupInfo = new RSGroupInfo(proto.getName()); - for(HBaseProtos.ServerName el: proto.getServersList()) { - RSGroupInfo.addServer(Address.fromParts(el.getHostName(), el.getPort())); - } - for(HBaseProtos.TableName pTableName: proto.getTablesList()) { - RSGroupInfo.addTable(ProtobufUtil.toTableName(pTableName)); - } - return RSGroupInfo; - } - - static RSGroupProtos.RSGroupInfo toProtoGroupInfo(RSGroupInfo pojo) { - List tables = new ArrayList<>(pojo.getTables().size()); - for(TableName arg: pojo.getTables()) { - tables.add(ProtobufUtil.toProtoTableName(arg)); - } - List hostports = new ArrayList<>(pojo.getServers().size()); - for(Address el: pojo.getServers()) { - hostports.add(HBaseProtos.ServerName.newBuilder() - .setHostName(el.getHostname()) - .setPort(el.getPort()) - .build()); - } - return RSGroupProtos.RSGroupInfo.newBuilder().setName(pojo.getName()) - .addAllServers(hostports) - .addAllTables(tables).build(); - } -} diff --git a/hbase-rsgroup/src/test/resources/log4j.properties b/hbase-rsgroup/src/test/resources/log4j.properties deleted file mode 100644 index c322699ced24..000000000000 --- a/hbase-rsgroup/src/test/resources/log4j.properties +++ /dev/null @@ -1,68 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Define some default values that can be overridden by system properties -hbase.root.logger=INFO,console -hbase.log.dir=. -hbase.log.file=hbase.log - -# Define the root logger to the system property "hbase.root.logger". -log4j.rootLogger=${hbase.root.logger} - -# Logging Threshold -log4j.threshold=ALL - -# -# Daily Rolling File Appender -# -log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file} - -# Rollver at midnight -log4j.appender.DRFA.DatePattern=.yyyy-MM-dd - -# 30-day backup -#log4j.appender.DRFA.MaxBackupIndex=30 -log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout -# Debugging Pattern format -log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n - - -# -# console -# Add "console" to rootlogger above if you want to use this -# -log4j.appender.console=org.apache.log4j.ConsoleAppender -log4j.appender.console.target=System.err -log4j.appender.console.layout=org.apache.log4j.PatternLayout -log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n - -# Custom Logging levels - -#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG - -log4j.logger.org.apache.hadoop=WARN -log4j.logger.org.apache.zookeeper=ERROR -log4j.logger.org.apache.hadoop.hbase=DEBUG - -#These settings are workarounds against spurious logs from the minicluster. -#See HBASE-4709 -log4j.logger.org.apache.hadoop.metrics2.impl.MetricsConfig=WARN -log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSinkAdapter=WARN -log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=WARN -log4j.logger.org.apache.hadoop.metrics2.util.MBeans=WARN -# Enable this to get detailed connection error/retry logging. -# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=TRACE diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index 2a2e66675bc2..c22b0f952d7a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -1340,6 +1340,24 @@ default void preGetRSGroupInfoOfServer(final ObserverContext ctx, final Address server) throws IOException {} + /** + * Called before setting rsgroup for tables + * @param ctx the environment to interact with the framework and master + * @param tables tables to set group + * @param groupName group name + */ + default void preSetRSGroupForTables(final ObserverContext ctx, + final Set tables, final String groupName) throws IOException {} + + /** + * Called after setting rsgroup for tables + * @param ctx the environment to interact with the framework and master + * @param tables tables to set group + * @param groupName group name + */ + default void postSetRSGroupForTables(final ObserverContext ctx, + final Set tables, final String groupName) throws IOException {} + /** * Called before add a replication peer * @param ctx the environment to interact with the framework and master diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index bb2aadbf74db..cab3a64004ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -186,6 +186,7 @@ import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner; import org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader; import org.apache.hadoop.hbase.replication.regionserver.ReplicationStatus; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.SecurityConstants; import org.apache.hadoop.hbase.security.UserProvider; @@ -350,6 +351,8 @@ public void run() { // manager of assignment nodes in zookeeper private AssignmentManager assignmentManager; + private RSGroupInfoManager rsGroupInfoManager; + // manager of replication private ReplicationPeerManager replicationPeerManager; @@ -772,6 +775,8 @@ protected void initializeZKBasedSystemTrackers() this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this); this.splitOrMergeTracker.start(); + this.rsGroupInfoManager = RSGroupInfoManager.create(this); + this.replicationPeerManager = ReplicationPeerManager.create(zooKeeper, conf); this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager); @@ -1963,7 +1968,7 @@ private void warmUpRegion(ServerName server, RegionInfo region) { // Replace with an async implementation from which you can get // a success/failure result. @VisibleForTesting - public void move(final byte[] encodedRegionName, byte[] destServerName) throws HBaseIOException { + public void move(final byte[] encodedRegionName, byte[] destServerName) throws IOException { RegionState regionState = assignmentManager.getRegionStates(). getRegionState(Bytes.toString(encodedRegionName)); @@ -3557,7 +3562,7 @@ public long transitReplicationPeerSyncReplicationState(String peerId, SyncReplic * @param servers Region servers to decommission. */ public void decommissionRegionServers(final List servers, final boolean offload) - throws HBaseIOException { + throws IOException { List serversAdded = new ArrayList<>(servers.size()); // Place the decommission marker first. String parentZnode = getZooKeeper().getZNodePaths().drainingZNode; @@ -3751,4 +3756,9 @@ public Map getWalGroupsReplicationStatus() { public HbckChore getHbckChore() { return this.hbckChore; } + + @Override + public RSGroupInfoManager getRSRSGroupInfoManager() { + return rsGroupInfoManager; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index 816636f8ae07..0fc544a6aec1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -19,12 +19,12 @@ package org.apache.hadoop.hbase.master; import edu.umd.cs.findbugs.annotations.Nullable; +import java.io.IOException; import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; -import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; @@ -65,95 +65,72 @@ public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObse ServerName BOGUS_SERVER_NAME = ServerName.valueOf("localhost,1,1"); /** - * Set the current cluster status. This allows a LoadBalancer to map host name to a server - * @param st + * Set the current cluster status. This allows a LoadBalancer to map host name to a server */ void setClusterMetrics(ClusterMetrics st); /** * Pass RegionStates and allow balancer to set the current cluster load. - * @param ClusterLoad */ void setClusterLoad(Map>> ClusterLoad); /** * Set the master service. - * @param masterServices */ void setMasterServices(MasterServices masterServices); /** * Perform the major balance operation - * @param tableName - * @param clusterState * @return List of plans */ - List balanceCluster(TableName tableName, Map> clusterState) throws HBaseIOException; + List balanceCluster(TableName tableName, + Map> clusterState) throws IOException; /** * Perform the major balance operation - * @param clusterState * @return List of plans */ - List balanceCluster(Map> clusterState) throws HBaseIOException; + List balanceCluster(Map> clusterState) + throws IOException; /** * Perform a Round Robin assignment of regions. - * @param regions - * @param servers * @return Map of servername to regioninfos */ - Map> roundRobinAssignment( - List regions, - List servers - ) throws HBaseIOException; + Map> roundRobinAssignment(List regions, + List servers) throws IOException; /** * Assign regions to the previously hosting region server - * @param regions - * @param servers * @return List of plans */ @Nullable - Map> retainAssignment( - Map regions, - List servers - ) throws HBaseIOException; + Map> retainAssignment(Map regions, + List servers) throws IOException; /** * Get a random region server from the list * @param regionInfo Region for which this selection is being done. - * @param servers - * @return Servername */ - ServerName randomAssignment( - RegionInfo regionInfo, List servers - ) throws HBaseIOException; + ServerName randomAssignment(RegionInfo regionInfo, List servers) throws IOException; /** * Initialize the load balancer. Must be called after setters. - * @throws HBaseIOException */ - void initialize() throws HBaseIOException; + void initialize() throws IOException; /** * Marks the region as online at balancer. - * @param regionInfo - * @param sn */ void regionOnline(RegionInfo regionInfo, ServerName sn); /** * Marks the region as offline at balancer. - * @param regionInfo */ void regionOffline(RegionInfo regionInfo); - /* + /** * Notification that config has changed - * @param conf */ @Override void onConfigurationChange(Configuration conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 47ef3d08bb4d..6a203b5852e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -1547,6 +1547,24 @@ public void call(MasterObserver observer) throws IOException { }); } + public void preSetRSGroupForTables(final Set tables, final String groupName) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { + @Override + public void call(MasterObserver observer) throws IOException { + observer.preSetRSGroupForTables(this, tables, groupName); + } + }); + } + + public void postSetRSGroupForTables(final Set tables, final String groupName) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { + @Override + public void call(MasterObserver observer) throws IOException { + observer.postSetRSGroupForTables(this, tables, groupName); + } + }); + } + public void preAddReplicationPeer(final String peerId, final ReplicationPeerConfig peerConfig) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index c8707f62ac65..01579de91292 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -30,6 +30,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; @@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerMetricsBuilder; @@ -92,6 +94,7 @@ import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.access.AccessChecker; @@ -116,6 +119,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; @@ -333,6 +337,32 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveServersResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.SetRSGroupForTablesRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.SetRSGroupForTablesResponse; +import org.apache.hadoop.hbase.rsgroup.RSGroupUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; /** @@ -340,9 +370,10 @@ */ @InterfaceAudience.Private @SuppressWarnings("deprecation") -public class MasterRpcServices extends RSRpcServices - implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface, - LockService.BlockingInterface, HbckService.BlockingInterface { +public class MasterRpcServices extends RSRpcServices implements MasterService.BlockingInterface, + RegionServerStatusService.BlockingInterface, + LockService.BlockingInterface, HbckService.BlockingInterface, + RSGroupAdminService.BlockingInterface{ private static final Logger LOG = LoggerFactory.getLogger(MasterRpcServices.class.getName()); private static final Logger AUDITLOG = LoggerFactory.getLogger("SecurityLogger."+MasterRpcServices.class.getName()); @@ -2776,4 +2807,270 @@ private boolean shouldSubmitSCP(ServerName serverName) { } return true; } + + + @Override + public GetRSGroupInfoResponse getRSGroupInfo(RpcController controller, + GetRSGroupInfoRequest request) throws ServiceException { + GetRSGroupInfoResponse.Builder builder = GetRSGroupInfoResponse.newBuilder(); + String groupName = request.getRSGroupName(); + LOG.info( + master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" + groupName); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preGetRSGroupInfo(groupName); + } + RSGroupInfo rsGroupInfo = master.getRSRSGroupInfoManager().getRSGroup(groupName); + if (rsGroupInfo != null) { + builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(fillTables(rsGroupInfo))); + } + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postGetRSGroupInfo(groupName); + } + } catch (IOException e) { + throw new ServiceException(e); + } + return builder.build(); + } + + @Override + public GetRSGroupInfoOfServerResponse getRSGroupInfoOfServer(RpcController controller, + GetRSGroupInfoOfServerRequest request) throws ServiceException { + GetRSGroupInfoOfServerResponse.Builder builder = GetRSGroupInfoOfServerResponse.newBuilder(); + Address hp = Address.fromParts(request.getServer().getHostName(), + request.getServer().getPort()); + LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp); + } + RSGroupInfo info = master.getRSRSGroupInfoManager().getRSGroupOfServer(hp); + if (info != null) { + builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(fillTables(info))); + } + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp); + } + } catch (IOException e) { + throw new ServiceException(e); + } + return builder.build(); + } + + private RSGroupInfo fillTables(RSGroupInfo rsGroupInfo) throws IOException { + return RSGroupUtil.fillTables(rsGroupInfo, master.getTableDescriptors().getAll().values()); + } + + @Override + public MoveServersResponse moveServers(RpcController controller, MoveServersRequest request) + throws ServiceException { + Set
hostPorts = Sets.newHashSet(); + MoveServersResponse.Builder builder = MoveServersResponse.newBuilder(); + for (HBaseProtos.ServerName el : request.getServersList()) { + hostPorts.add(Address.fromParts(el.getHostName(), el.getPort())); + } + LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup " + + request.getTargetGroup()); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup()); + } + master.getRSRSGroupInfoManager().moveServers(hostPorts, request.getTargetGroup()); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup()); + } + } catch (IOException e) { + throw new ServiceException(e); + } + return builder.build(); + } + + @Deprecated + @Override + public MoveTablesResponse moveTables(RpcController controller, MoveTablesRequest request) + throws ServiceException { + return null; + } + + @Override + public AddRSGroupResponse addRSGroup(RpcController controller, AddRSGroupRequest request) + throws ServiceException { + AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder(); + LOG.info(master.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName()); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName()); + } + master.getRSRSGroupInfoManager().addRSGroup(new RSGroupInfo(request.getRSGroupName())); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName()); + } + } catch (IOException e) { + throw new ServiceException(e); + } + return builder.build(); + } + + @Override + public RemoveRSGroupResponse removeRSGroup(RpcController controller, RemoveRSGroupRequest request) + throws ServiceException { + RemoveRSGroupResponse.Builder builder = RemoveRSGroupResponse.newBuilder(); + LOG.info(master.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName()); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName()); + } + master.getRSRSGroupInfoManager().removeRSGroup(request.getRSGroupName()); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName()); + } + } catch (IOException e) { + throw new ServiceException(e); + } + return builder.build(); + } + + @Override + public BalanceRSGroupResponse balanceRSGroup(RpcController controller, + BalanceRSGroupRequest request) throws ServiceException { + BalanceRSGroupResponse.Builder builder = BalanceRSGroupResponse.newBuilder(); + LOG.info( + master.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName()); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName()); + } + boolean balancerRan = + master.getRSRSGroupInfoManager().balanceRSGroup(request.getRSGroupName()); + builder.setBalanceRan(balancerRan); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(), balancerRan); + } + } catch (IOException e) { + throw new ServiceException(e); + } + return builder.build(); + } + + @Override + public ListRSGroupInfosResponse listRSGroupInfos(RpcController controller, + ListRSGroupInfosRequest request) throws ServiceException { + ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder(); + LOG.info(master.getClientIdAuditPrefix() + " list rsgroup"); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preListRSGroups(); + } + List rsGroupInfos = master.getRSRSGroupInfoManager().listRSGroups().stream() + .map(RSGroupInfo::new).collect(Collectors.toList()); + Map name2Info = new HashMap<>(); + for (RSGroupInfo rsGroupInfo : rsGroupInfos) { + name2Info.put(rsGroupInfo.getName(), rsGroupInfo); + } + for (TableDescriptor td : master.getTableDescriptors().getAll().values()) { + String groupName = td.getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP); + RSGroupInfo rsGroupInfo = name2Info.get(groupName); + if (rsGroupInfo != null) { + rsGroupInfo.addTable(td.getTableName()); + } + } + for (RSGroupInfo rsGroupInfo : rsGroupInfos) { + // TODO: this can be done at once outside this loop, do not need to scan all every time. + builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)); + } + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postListRSGroups(); + } + } catch (IOException e) { + throw new ServiceException(e); + } + return builder.build(); + } + + @Deprecated + @Override + public MoveServersAndTablesResponse moveServersAndTables(RpcController controller, + MoveServersAndTablesRequest request) throws ServiceException { + return null; + } + + @Override + public RemoveServersResponse removeServers(RpcController controller, RemoveServersRequest request) + throws ServiceException { + RemoveServersResponse.Builder builder = RemoveServersResponse.newBuilder(); + Set
servers = Sets.newHashSet(); + for (HBaseProtos.ServerName el : request.getServersList()) { + servers.add(Address.fromParts(el.getHostName(), el.getPort())); + } + LOG.info(master.getClientIdAuditPrefix() + + " remove decommissioned servers from rsgroup: " + servers); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preRemoveServers(servers); + } + master.getRSRSGroupInfoManager().removeServers(servers); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postRemoveServers(servers); + } + } catch (IOException e) { + throw new ServiceException(e); + } + return builder.build(); + } + + @Override + public SetRSGroupForTablesResponse setRSGroupForTables(RpcController controller, SetRSGroupForTablesRequest request) + throws ServiceException { + SetRSGroupForTablesResponse.Builder builder = SetRSGroupForTablesResponse.newBuilder(); + Set tables = new HashSet<>(request.getTableNameList().size()); + for (HBaseProtos.TableName tableName : request.getTableNameList()) { + tables.add(ProtobufUtil.toTableName(tableName)); + } + LOG.info(master.getClientIdAuditPrefix() + " set tables " + tables + " rsgroup as " + + request.getTargetGroup()); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preSetRSGroupForTables(tables, request.getTargetGroup()); + } + master.getRSRSGroupInfoManager().setRSGroup(tables, request.getTargetGroup()); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postSetRSGroupForTables(tables, request.getTargetGroup()); + } + } catch (IOException e) { + throw new ServiceException(e); + } + return builder.build(); + } + + @Override + public GetRSGroupInfoOfTableResponse getRSGroupInfoOfTable(RpcController controller, + GetRSGroupInfoOfTableRequest request) throws ServiceException{ + GetRSGroupInfoOfTableResponse.Builder builder = GetRSGroupInfoOfTableResponse.newBuilder(); + TableName tableName = ProtobufUtil.toTableName(request.getTableName()); + LOG.info( + master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" + tableName); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName); + } + Optional optGroup = + RSGroupUtil.getRSGroupInfo(master, master.getRSRSGroupInfoManager(), tableName); + if (optGroup.isPresent()) { + builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(fillTables(optGroup.get()))); + } else { + if (master.getTableStateManager().isTablePresent(tableName)) { + RSGroupInfo rsGroupInfo = + master.getRSRSGroupInfoManager().getRSGroup(RSGroupInfo.DEFAULT_GROUP); + builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(fillTables(rsGroupInfo))); + } + } + + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName); + } + } catch (IOException e) { + throw new ServiceException(e); + } + return builder.build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 41cec5cfb232..9c90368be07e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -18,10 +18,8 @@ package org.apache.hadoop.hbase.master; import com.google.protobuf.Service; - import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptors; @@ -51,8 +49,10 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.SyncReplicationState; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager; import org.apache.hadoop.hbase.security.access.AccessChecker; import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; +import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; @@ -537,4 +537,16 @@ default SplitWALManager getSplitWALManager(){ */ List executeRegionPlansWithThrottling(List plans); + /** + * @return the {@link RSGroupInfoManager} + */ + RSGroupInfoManager getRSRSGroupInfoManager(); + + /** + * Queries the state of the {@link LoadBalancerTracker}. If the balancer is not initialized, + * false is returned. + * + * @return The state of the load balancer, or false if the load balancer isn't defined. + */ + boolean isBalancerOn(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index a231facfb798..24ad0d9098e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -683,7 +683,7 @@ public TransitRegionStateProcedure[] createRoundRobinAssignProcedures(List r } try { acceptPlan(regions, balancer.retainAssignment(retainMap, servers)); - } catch (HBaseIOException e) { + } catch (IOException e) { LOG.warn("unable to retain assignment", e); addToPendingAssignment(regions, retainMap.keySet()); } @@ -2001,7 +2001,7 @@ private void processAssignmentPlans(final HashMap r } try { acceptPlan(regions, balancer.roundRobinAssignment(hris, servers)); - } catch (HBaseIOException e) { + } catch (IOException e) { LOG.warn("unable to round-robin assignment", e); addToPendingAssignment(regions, hris); } diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java similarity index 75% rename from hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java index 9ea996be1cc3..344d0b385366 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java @@ -20,8 +20,6 @@ import java.io.IOException; import java.util.List; import java.util.Set; - -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.net.Address; import org.apache.yetus.audience.InterfaceAudience; @@ -35,22 +33,11 @@ public interface RSGroupAdmin { */ RSGroupInfo getRSGroupInfo(String groupName) throws IOException; - /** - * Gets {@code RSGroupInfo} for the given table's group. - */ - RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException; - /** * Move given set of servers to the specified target RegionServer group. */ void moveServers(Set
servers, String targetGroup) throws IOException; - /** - * Move given set of tables to the specified target RegionServer group. - * This will unassign all of a table's region so it can be reassigned to the correct group. - */ - void moveTables(Set tables, String targetGroup) throws IOException; - /** * Creates a new RegionServer group with the given name. */ @@ -79,16 +66,6 @@ public interface RSGroupAdmin { */ RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException; - /** - * Move given set of servers and tables to the specified target RegionServer group. - * @param servers set of servers to move - * @param tables set of tables to move - * @param targetGroup the target group name - * @throws IOException if moving the server and tables fail - */ - void moveServersAndTables(Set
servers, Set tables, - String targetGroup) throws IOException; - /** * Remove decommissioned servers from rsgroup. * 1. Sometimes we may find the server aborted due to some hardware failure and we must offline diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java new file mode 100644 index 000000000000..70cbf279095c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java @@ -0,0 +1,1061 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rsgroup; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Future; +import java.util.regex.Pattern; +import com.google.protobuf.ServiceException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CacheEvictionStats; +import org.apache.hadoop.hbase.ClusterMetrics; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.NamespaceNotFoundException; +import org.apache.hadoop.hbase.RegionMetrics; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.CompactType; +import org.apache.hadoop.hbase.client.CompactionState; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.SnapshotDescription; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.replication.TableCFs; +import org.apache.hadoop.hbase.client.security.SecurityCapability; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.SetRSGroupForTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.quotas.QuotaFilter; +import org.apache.hadoop.hbase.quotas.QuotaSettings; +import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotView; +import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; +import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; +import org.apache.hadoop.hbase.replication.SyncReplicationState; +import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest; +import org.apache.hadoop.hbase.security.access.Permission; +import org.apache.hadoop.hbase.security.access.UserPermission; +import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; +import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; +import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; +import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Client used for managing region server group information. + */ +@InterfaceAudience.Private +public class RSGroupAdminClient implements RSGroupAdmin, Admin { + private RSGroupAdminService.BlockingInterface stub; + private Admin admin; + + + public RSGroupAdminClient(Connection conn) throws IOException { + admin = conn.getAdmin(); + stub = RSGroupAdminService.newBlockingStub(admin.coprocessorService()); + } + // for writing UTs + @VisibleForTesting + protected RSGroupAdminClient() { + } + + @Override + public int getOperationTimeout() { + return 0; + } + + @Override + public int getSyncWaitTimeout() { + return 0; + } + + @Override + public void abort(String why, Throwable e) { + + } + + @Override + public boolean isAborted() { + return false; + } + + @Override + public Connection getConnection() { + return null; + } + + @Override + public boolean tableExists(TableName tableName) throws IOException { + return false; + } + + @Override + public List listTableDescriptors() throws IOException { + return null; + } + + @Override + public List listTableDescriptors(Pattern pattern) throws IOException { + return null; + } + + @Override + public List listTableDescriptors(Pattern pattern, boolean includeSysTables) + throws IOException { + return null; + } + + @Override + public TableName[] listTableNames() throws IOException { + return new TableName[0]; + } + + @Override + public TableName[] listTableNames(Pattern pattern, boolean includeSysTables) throws IOException { + return new TableName[0]; + } + + @Override + public TableDescriptor getDescriptor(TableName tableName) + throws TableNotFoundException, IOException { + return null; + } + + @Override + public void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) + throws IOException { + + } + + @Override + public Future createTableAsync(TableDescriptor desc) throws IOException { + return null; + } + + @Override + public Future createTableAsync(TableDescriptor desc, byte[][] splitKeys) + throws IOException { + return null; + } + + @Override + public Future deleteTableAsync(TableName tableName) throws IOException { + return null; + } + + @Override + public Future truncateTableAsync(TableName tableName, boolean preserveSplits) + throws IOException { + return null; + } + + @Override + public Future enableTableAsync(TableName tableName) throws IOException { + return null; + } + + @Override + public Future disableTableAsync(TableName tableName) throws IOException { + return null; + } + + @Override + public boolean isTableEnabled(TableName tableName) throws IOException { + return false; + } + + @Override + public boolean isTableDisabled(TableName tableName) throws IOException { + return false; + } + + @Override + public boolean isTableAvailable(TableName tableName) throws IOException { + return false; + } + + @Override + public Future addColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor columnFamily) + throws IOException { + return null; + } + + @Override + public Future deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily) + throws IOException { + return null; + } + + @Override + public Future modifyColumnFamilyAsync(TableName tableName, + ColumnFamilyDescriptor columnFamily) throws IOException { + return null; + } + + @Override + public List getRegions(ServerName serverName) throws IOException { + return null; + } + + @Override + public void flush(TableName tableName) throws IOException { + + } + + @Override + public void flushRegion(byte[] regionName) throws IOException { + + } + + @Override + public void flushRegionServer(ServerName serverName) throws IOException { + + } + + @Override + public void compact(TableName tableName) throws IOException { + + } + + @Override + public void compactRegion(byte[] regionName) throws IOException { + + } + + @Override + public void compact(TableName tableName, byte[] columnFamily) throws IOException { + + } + + @Override + public void compactRegion(byte[] regionName, byte[] columnFamily) throws IOException { + + } + + @Override + public void compact(TableName tableName, CompactType compactType) + throws IOException, InterruptedException { + + } + + @Override + public void compact(TableName tableName, byte[] columnFamily, CompactType compactType) + throws IOException, InterruptedException { + + } + + @Override + public void majorCompact(TableName tableName) throws IOException { + + } + + @Override + public void majorCompactRegion(byte[] regionName) throws IOException { + + } + + @Override + public void majorCompact(TableName tableName, byte[] columnFamily) throws IOException { + + } + + @Override + public void majorCompactRegion(byte[] regionName, byte[] columnFamily) throws IOException { + + } + + @Override + public void majorCompact(TableName tableName, CompactType compactType) + throws IOException, InterruptedException { + + } + + @Override + public void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType) + throws IOException, InterruptedException { + + } + + @Override + public Map compactionSwitch(boolean switchState, + List serverNamesList) throws IOException { + return null; + } + + @Override + public void compactRegionServer(ServerName serverName) throws IOException { + + } + + @Override + public void majorCompactRegionServer(ServerName serverName) throws IOException { + + } + + @Override + public void move(byte[] encodedRegionName) throws IOException { + + } + + @Override + public void move(byte[] encodedRegionName, ServerName destServerName) throws IOException { + + } + + @Override + public void assign(byte[] regionName) throws IOException { + + } + + @Override + public void unassign(byte[] regionName, boolean force) throws IOException { + + } + + @Override + public void offline(byte[] regionName) throws IOException { + + } + + @Override + public boolean balancerSwitch(boolean onOrOff, boolean synchronous) throws IOException { + return false; + } + + @Override + public boolean balance() throws IOException { + return false; + } + + @Override + public boolean balance(boolean force) throws IOException { + return false; + } + + @Override + public boolean isBalancerEnabled() throws IOException { + return false; + } + + @Override + public CacheEvictionStats clearBlockCache(TableName tableName) throws IOException { + return null; + } + + @Override + public boolean normalize() throws IOException { + return false; + } + + @Override + public boolean isNormalizerEnabled() throws IOException { + return false; + } + + @Override + public boolean normalizerSwitch(boolean on) throws IOException { + return false; + } + + @Override + public boolean catalogJanitorSwitch(boolean onOrOff) throws IOException { + return false; + } + + @Override + public int runCatalogJanitor() throws IOException { + return 0; + } + + @Override + public boolean isCatalogJanitorEnabled() throws IOException { + return false; + } + + @Override + public boolean cleanerChoreSwitch(boolean onOrOff) throws IOException { + return false; + } + + @Override + public boolean runCleanerChore() throws IOException { + return false; + } + + @Override + public boolean isCleanerChoreEnabled() throws IOException { + return false; + } + + @Override + public Future mergeRegionsAsync(byte[][] nameofRegionsToMerge, boolean forcible) + throws IOException { + return null; + } + + @Override + public void split(TableName tableName) throws IOException { + + } + + @Override + public void split(TableName tableName, byte[] splitPoint) throws IOException { + + } + + @Override + public Future splitRegionAsync(byte[] regionName) throws IOException { + return null; + } + + @Override + public Future splitRegionAsync(byte[] regionName, byte[] splitPoint) throws IOException { + return null; + } + + @Override + public Future modifyTableAsync(TableDescriptor td) throws IOException { + return null; + } + + @Override + public void shutdown() throws IOException { + + } + + @Override + public void stopMaster() throws IOException { + + } + + @Override + public boolean isMasterInMaintenanceMode() throws IOException { + return false; + } + + @Override + public void stopRegionServer(String hostnamePort) throws IOException { + + } + + @Override + public ClusterMetrics getClusterMetrics(EnumSet options) + throws IOException { + return null; + } + + @Override + public List getRegionMetrics(ServerName serverName) throws IOException { + return null; + } + + @Override + public List getRegionMetrics(ServerName serverName, TableName tableName) + throws IOException { + return null; + } + + @Override + public Configuration getConfiguration() { + return null; + } + + @Override + public Future createNamespaceAsync(NamespaceDescriptor descriptor) throws IOException { + return null; + } + + @Override + public Future modifyNamespaceAsync(NamespaceDescriptor descriptor) throws IOException { + return null; + } + + @Override + public Future deleteNamespaceAsync(String name) throws IOException { + return null; + } + + @Override + public NamespaceDescriptor getNamespaceDescriptor(String name) + throws NamespaceNotFoundException, IOException { + return null; + } + + @Override + public String[] listNamespaces() throws IOException { + return new String[0]; + } + + @Override + public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException { + return new NamespaceDescriptor[0]; + } + + @Override + public List listTableDescriptorsByNamespace(byte[] name) throws IOException { + return null; + } + + @Override + public TableName[] listTableNamesByNamespace(String name) throws IOException { + return new TableName[0]; + } + + @Override + public List getRegions(TableName tableName) throws IOException { + return null; + } + + @Override + public void close() { + + } + + @Override + public List listTableDescriptors(List tableNames) throws IOException { + return null; + } + + @Override + public Future abortProcedureAsync(long procId, boolean mayInterruptIfRunning) + throws IOException { + return null; + } + + @Override + public String getProcedures() throws IOException { + return null; + } + + @Override + public String getLocks() throws IOException { + return null; + } + + @Override + public void rollWALWriter(ServerName serverName) throws IOException, FailedLogCloseException { + + } + + @Override + public CompactionState getCompactionState(TableName tableName) throws IOException { + return null; + } + + @Override + public CompactionState getCompactionState(TableName tableName, CompactType compactType) + throws IOException { + return null; + } + + @Override + public CompactionState getCompactionStateForRegion(byte[] regionName) throws IOException { + return null; + } + + @Override + public long getLastMajorCompactionTimestamp(TableName tableName) throws IOException { + return 0; + } + + @Override + public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException { + return 0; + } + + @Override + public void snapshot(SnapshotDescription snapshot) + throws IOException, SnapshotCreationException, IllegalArgumentException { + + } + + @Override + public Future snapshotAsync(SnapshotDescription snapshot) + throws IOException, SnapshotCreationException { + return null; + } + + @Override + public boolean isSnapshotFinished(SnapshotDescription snapshot) + throws IOException, HBaseSnapshotException, UnknownSnapshotException { + return false; + } + + @Override + public void restoreSnapshot(String snapshotName) throws IOException, RestoreSnapshotException { + + } + + @Override + public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, boolean restoreAcl) + throws IOException, RestoreSnapshotException { + + } + + @Override + public Future cloneSnapshotAsync(String snapshotName, TableName tableName, + boolean restoreAcl) throws IOException, TableExistsException, RestoreSnapshotException { + return null; + } + + @Override + public void execProcedure(String signature, String instance, Map props) + throws IOException { + + } + + @Override + public byte[] execProcedureWithReturn(String signature, String instance, + Map props) throws IOException { + return new byte[0]; + } + + @Override + public boolean isProcedureFinished(String signature, String instance, Map props) + throws IOException { + return false; + } + + @Override + public List listSnapshots() throws IOException { + return null; + } + + @Override + public List listSnapshots(Pattern pattern) throws IOException { + return null; + } + + @Override + public List listTableSnapshots(Pattern tableNamePattern, + Pattern snapshotNamePattern) throws IOException { + return null; + } + + @Override + public void deleteSnapshot(String snapshotName) throws IOException { + + } + + @Override + public void deleteSnapshots(Pattern pattern) throws IOException { + + } + + @Override + public void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) + throws IOException { + + } + + @Override + public void setQuota(QuotaSettings quota) throws IOException { + + } + + @Override + public List getQuota(QuotaFilter filter) throws IOException { + return null; + } + + @Override + public CoprocessorRpcChannel coprocessorService() { + return null; + } + + @Override + public CoprocessorRpcChannel coprocessorService(ServerName serverName) { + return null; + } + + @Override + public void updateConfiguration(ServerName server) throws IOException { + + } + + @Override + public void updateConfiguration() throws IOException { + + } + + @Override + public List getSecurityCapabilities() throws IOException { + return null; + } + + @Override + public boolean splitSwitch(boolean enabled, boolean synchronous) throws IOException { + return false; + } + + @Override + public boolean mergeSwitch(boolean enabled, boolean synchronous) throws IOException { + return false; + } + + @Override + public boolean isSplitEnabled() throws IOException { + return false; + } + + @Override + public boolean isMergeEnabled() throws IOException { + return false; + } + + @Override + public Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig, + boolean enabled) throws IOException { + return null; + } + + @Override + public Future removeReplicationPeerAsync(String peerId) throws IOException { + return null; + } + + @Override + public Future enableReplicationPeerAsync(String peerId) throws IOException { + return null; + } + + @Override + public Future disableReplicationPeerAsync(String peerId) throws IOException { + return null; + } + + @Override + public ReplicationPeerConfig getReplicationPeerConfig(String peerId) throws IOException { + return null; + } + + @Override + public Future updateReplicationPeerConfigAsync(String peerId, + ReplicationPeerConfig peerConfig) throws IOException { + return null; + } + + @Override + public List listReplicationPeers() throws IOException { + return null; + } + + @Override + public List listReplicationPeers(Pattern pattern) throws IOException { + return null; + } + + @Override + public Future transitReplicationPeerSyncReplicationStateAsync(String peerId, + SyncReplicationState state) throws IOException { + return null; + } + + @Override + public void decommissionRegionServers(List servers, boolean offload) + throws IOException { + + } + + @Override + public List listDecommissionedRegionServers() throws IOException { + return null; + } + + @Override + public void recommissionRegionServer(ServerName server, List encodedRegionNames) + throws IOException { + + } + + @Override + public List listReplicatedTableCFs() throws IOException { + return null; + } + + @Override + public void enableTableReplication(TableName tableName) throws IOException { + + } + + @Override + public void disableTableReplication(TableName tableName) throws IOException { + + } + + @Override + public void clearCompactionQueues(ServerName serverName, Set queues) + throws IOException, InterruptedException { + + } + + @Override + public List clearDeadServers(List servers) throws IOException { + return null; + } + + @Override + public void cloneTableSchema(TableName tableName, TableName newTableName, boolean preserveSplits) + throws IOException { + + } + + @Override + public boolean switchRpcThrottle(boolean enable) throws IOException { + return false; + } + + @Override + public boolean isRpcThrottleEnabled() throws IOException { + return false; + } + + @Override + public boolean exceedThrottleQuotaSwitch(boolean enable) throws IOException { + return false; + } + + @Override + public Map getSpaceQuotaTableSizes() throws IOException { + return null; + } + + @Override + public Map getRegionServerSpaceQuotaSnapshots( + ServerName serverName) throws IOException { + return null; + } + + @Override + public SpaceQuotaSnapshotView getCurrentSpaceQuotaSnapshot(String namespace) throws IOException { + return null; + } + + @Override + public SpaceQuotaSnapshotView getCurrentSpaceQuotaSnapshot(TableName tableName) + throws IOException { + return null; + } + + @Override + public void grant(UserPermission userPermission, boolean mergeExistingPermissions) + throws IOException { + + } + + @Override + public void revoke(UserPermission userPermission) throws IOException { + + } + + @Override + public List getUserPermissions( + GetUserPermissionsRequest getUserPermissionsRequest) throws IOException { + return null; + } + + @Override + public List hasUserPermissions(String userName, List permissions) + throws IOException { + return null; + } + + @Override + public RSGroupInfo getRSGroupInfo(String groupName) throws IOException { + try { + GetRSGroupInfoResponse resp = stub.getRSGroupInfo(null, + GetRSGroupInfoRequest.newBuilder().setRSGroupName(groupName).build()); + if (resp.hasRSGroupInfo()) { + return ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()); + } + return null; + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException { + GetRSGroupInfoOfTableRequest request = GetRSGroupInfoOfTableRequest.newBuilder().setTableName( + ProtobufUtil.toProtoTableName(tableName)).build(); + try { + GetRSGroupInfoOfTableResponse resp = stub.getRSGroupInfoOfTable(null, request); + if (resp.hasRSGroupInfo()) { + return ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()); + } + return null; + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public void setRSGroupForTables(Set tables, String groupName) throws IOException { + SetRSGroupForTablesRequest.Builder builder = + SetRSGroupForTablesRequest.newBuilder().setTargetGroup(groupName); + for(TableName tableName: tables) { + builder.addTableName(ProtobufUtil.toProtoTableName(tableName)); + if (!admin.tableExists(tableName)) { + throw new TableNotFoundException(tableName); + } + } + try { + stub.setRSGroupForTables(null, builder.build()); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public void moveServers(Set
servers, String targetGroup) throws IOException { + Set hostPorts = Sets.newHashSet(); + for(Address el: servers) { + hostPorts.add(HBaseProtos.ServerName.newBuilder() + .setHostName(el.getHostname()) + .setPort(el.getPort()) + .build()); + } + MoveServersRequest request = MoveServersRequest.newBuilder() + .setTargetGroup(targetGroup) + .addAllServers(hostPorts) + .build(); + try { + stub.moveServers(null, request); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public void addRSGroup(String groupName) throws IOException { + AddRSGroupRequest request = AddRSGroupRequest.newBuilder().setRSGroupName(groupName).build(); + try { + stub.addRSGroup(null, request); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public void removeRSGroup(String name) throws IOException { + RemoveRSGroupRequest request = RemoveRSGroupRequest.newBuilder().setRSGroupName(name).build(); + try { + stub.removeRSGroup(null, request); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public boolean balanceRSGroup(String groupName) throws IOException { + BalanceRSGroupRequest request = BalanceRSGroupRequest.newBuilder() + .setRSGroupName(groupName).build(); + try { + return stub.balanceRSGroup(null, request).getBalanceRan(); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public List listRSGroups() throws IOException { + try { + List resp = stub.listRSGroupInfos(null, + ListRSGroupInfosRequest.getDefaultInstance()).getRSGroupInfoList(); + List result = new ArrayList<>(resp.size()); + for(RSGroupProtos.RSGroupInfo entry : resp) { + result.add(ProtobufUtil.toGroupInfo(entry)); + } + return result; + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException { + GetRSGroupInfoOfServerRequest request = GetRSGroupInfoOfServerRequest.newBuilder() + .setServer(HBaseProtos.ServerName.newBuilder() + .setHostName(hostPort.getHostname()) + .setPort(hostPort.getPort()) + .build()) + .build(); + try { + GetRSGroupInfoOfServerResponse resp = stub.getRSGroupInfoOfServer(null, request); + if (resp.hasRSGroupInfo()) { + return ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()); + } + return null; + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + public void moveServersAndTables(Set
servers, Set tables, String targetGroup) + throws IOException { + setRSGroupForTables(tables, targetGroup); + moveServers(servers, targetGroup); + } + + @Override + public void removeServers(Set
servers) throws IOException { + Set hostPorts = Sets.newHashSet(); + for(Address el: servers) { + hostPorts.add(HBaseProtos.ServerName.newBuilder() + .setHostName(el.getHostname()) + .setPort(el.getPort()) + .build()); + } + RemoveServersRequest request = RemoveServersRequest.newBuilder() + .addAllServers(hostPorts) + .build(); + try { + stub.removeServers(null, request); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java new file mode 100644 index 000000000000..b4dc66ace807 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java @@ -0,0 +1,328 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rsgroup; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import com.google.protobuf.Service; +import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor; +import org.apache.hadoop.hbase.coprocessor.HasMasterServices; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.MasterObserver; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.UserProvider; +import org.apache.hadoop.hbase.security.access.AccessChecker; +import org.apache.hadoop.hbase.security.access.Permission; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.yetus.audience.InterfaceAudience; + +// TODO: Encapsulate MasterObserver functions into separate subclass. +@CoreCoprocessor +@InterfaceAudience.Private +public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { + // Only instance of RSGroupInfoManager. RSGroup aware load balancers ask for this instance on + // their setup. + private MasterServices master; + private RSGroupAdminServiceImpl groupAdminService = new RSGroupAdminServiceImpl(); + private AccessChecker accessChecker; + private UserProvider userProvider; + + @Override + public void start(CoprocessorEnvironment env) throws IOException { + if (!(env instanceof HasMasterServices)) { + throw new IOException("Does not implement HMasterServices"); + } + + master = ((HasMasterServices) env).getMasterServices(); + Class clazz = + master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null); + if (!RSGroupableBalancer.class.isAssignableFrom(clazz)) { + throw new IOException("Configured balancer does not support RegionServer groups."); + } + accessChecker = master.getAccessChecker(); + userProvider = UserProvider.instantiate(env.getConfiguration()); + groupAdminService.initialize(master); + } + + /** + * Returns the active user to which authorization checks should be applied. If we are in the + * context of an RPC call, the remote user is used, otherwise the currently logged in user is + * used. + */ + private User getActiveUser() throws IOException { + // for non-rpc handling, fallback to system user + Optional optionalUser = RpcServer.getRequestUser(); + if (optionalUser.isPresent()) { + return optionalUser.get(); + } + return userProvider.getCurrent(); + } + + @Override + public void stop(CoprocessorEnvironment env) { + } + + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + + @Override + public Iterable getServices() { + return Collections.singleton(groupAdminService); + } + + RSGroupInfoManager getGroupInfoManager() { + return master.getRSRSGroupInfoManager(); + } + + ///////////////////////////////////////////////////////////////////////////// + // MasterObserver overrides + ///////////////////////////////////////////////////////////////////////////// + + @Override + public void postClearDeadServers(ObserverContext ctx, + List servers, List notClearedServers) throws IOException { + Set
clearedServer = + servers.stream().filter(server -> !notClearedServers.contains(server)) + .map(ServerName::getAddress).collect(Collectors.toSet()); + if (!clearedServer.isEmpty()) { + master.getRSRSGroupInfoManager().removeServers(clearedServer); + } + } + + private RSGroupInfo checkGroupExists(Optional optGroupName, Supplier forWhom) + throws IOException { + if (optGroupName.isPresent()) { + String groupName = optGroupName.get(); + RSGroupInfo group = master.getRSRSGroupInfoManager().getRSGroup(groupName); + if (group == null) { + throw new ConstraintException( + "Region server group " + groupName + " for " + forWhom.get() + " does not exit"); + } + return group; + } + return null; + } + + private Optional getNamespaceGroup(NamespaceDescriptor namespaceDesc) { + return Optional + .ofNullable(namespaceDesc.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP)); + } + + // Do not allow creating new tables/namespaces which has an empty rs group, expect the default rs + // group. Notice that we do not check for online servers, as this is not stable because region + // servers can die at any time. + private void checkGroupNotEmpty(RSGroupInfo rsGroupInfo, Supplier forWhom) + throws ConstraintException { + if (rsGroupInfo == null || rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { + // we do not have a rs group config or we explicitly set the rs group to default, then no need + // to check. + return; + } + if (rsGroupInfo.getServers().isEmpty()) { + throw new ConstraintException( + "No servers in the rsgroup " + rsGroupInfo.getName() + " for " + forWhom.get()); + } + } + + @Override + public void preCreateTableAction(ObserverContext ctx, + TableDescriptor desc, RegionInfo[] regions) throws IOException { + if (desc.getTableName().isSystemTable()) { + // do not check for system tables as we may block the bootstrap. + return; + } + Supplier forWhom = () -> "table " + desc.getTableName(); + RSGroupInfo rsGroupInfo = checkGroupExists(desc.getRegionServerGroup(), forWhom); + if (rsGroupInfo == null) { + // we do not set rs group info on table, check if we have one on namespace + String namespace = desc.getTableName().getNamespaceAsString(); + NamespaceDescriptor nd = master.getClusterSchema().getNamespace(namespace); + forWhom = () -> "table " + desc.getTableName() + "(inherit from namespace)"; + rsGroupInfo = checkGroupExists(getNamespaceGroup(nd), forWhom); + } + checkGroupNotEmpty(rsGroupInfo, forWhom); + } + + @Override + public TableDescriptor preModifyTable(ObserverContext ctx, + TableName tableName, TableDescriptor currentDescriptor, TableDescriptor newDescriptor) + throws IOException { + if (!currentDescriptor.getRegionServerGroup().equals(newDescriptor.getRegionServerGroup())) { + Supplier forWhom = () -> "table " + newDescriptor.getTableName(); + RSGroupInfo rsGroupInfo = checkGroupExists(newDescriptor.getRegionServerGroup(), forWhom); + checkGroupNotEmpty(rsGroupInfo, forWhom); + } + return MasterObserver.super.preModifyTable(ctx, tableName, currentDescriptor, newDescriptor); + } + + private void checkNamespaceGroup(NamespaceDescriptor nd) throws IOException { + Supplier forWhom = () -> "namespace " + nd.getName(); + RSGroupInfo rsGroupInfo = checkGroupExists(getNamespaceGroup(nd), forWhom); + checkGroupNotEmpty(rsGroupInfo, forWhom); + } + + @Override + public void preCreateNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + checkNamespaceGroup(ns); + } + + @Override + public void preModifyNamespace(ObserverContext ctx, + NamespaceDescriptor currentNsDescriptor, NamespaceDescriptor newNsDescriptor) + throws IOException { + if (!Objects.equals( + currentNsDescriptor.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP), + newNsDescriptor.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP))) { + checkNamespaceGroup(newNsDescriptor); + } + } + + @Override + public void preMoveServersAndTables(ObserverContext ctx, + Set
servers, Set tables, String targetGroup) throws IOException { + accessChecker.requirePermission(getActiveUser(), "moveServersAndTables", + null, Permission.Action.ADMIN); + try (Admin admin = ctx.getEnvironment().getConnection().getAdmin()) { + for (TableName tableName : tables) { + // Skip checks for a table that does not exist + if (!admin.tableExists(tableName)) { + throw new TableNotFoundException(tableName); + } + } + } + } + + @Override + public void preMoveServers(final ObserverContext ctx, + Set
servers, String targetGroup) throws IOException { + accessChecker.requirePermission(getActiveUser(), "moveServers", + null, Permission.Action.ADMIN); + } + + @Override + public void preMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + accessChecker.requirePermission(getActiveUser(), "moveTables", + null, Permission.Action.ADMIN); + try (Admin admin = ctx.getEnvironment().getConnection().getAdmin()) { + for (TableName tableName : tables) { + // Skip checks for a table that does not exist + if (!admin.tableExists(tableName)) { + throw new TableNotFoundException(tableName); + } + } + } + } + + @Override + public void preAddRSGroup(ObserverContext ctx, + String name) throws IOException { + accessChecker.requirePermission(getActiveUser(), "addRSGroup", + null, Permission.Action.ADMIN); + } + + @Override + public void preRemoveRSGroup(ObserverContext ctx, + String name) throws IOException { + accessChecker.requirePermission(getActiveUser(), "removeRSGroup", + null, Permission.Action.ADMIN); + } + + @Override + public void preBalanceRSGroup(ObserverContext ctx, + String groupName) throws IOException { + accessChecker.requirePermission(getActiveUser(), "balanceRSGroup", + null, Permission.Action.ADMIN); + } + + @Override + public void preRemoveServers( + ObserverContext ctx, + Set
servers) throws IOException { + accessChecker.requirePermission(getActiveUser(), "removeServers", + null, Permission.Action.ADMIN); + } + + @Override + public void preGetRSGroupInfo(ObserverContext ctx, + String groupName) throws IOException { + accessChecker.requirePermission(getActiveUser(), "getRSGroupInfo", + null, Permission.Action.ADMIN); + } + + @Override + public void preGetRSGroupInfoOfTable(ObserverContext ctx, + TableName tableName) throws IOException { + accessChecker.requirePermission(getActiveUser(), "getRSGroupInfoOfTable", + null, Permission.Action.ADMIN); + //todo: should add check for table existence + } + + @Override + public void preListRSGroups(ObserverContext ctx) + throws IOException { + accessChecker.requirePermission(getActiveUser(), "listRSGroups", + null, Permission.Action.ADMIN); + } + + @Override + public void preGetRSGroupInfoOfServer(ObserverContext ctx, + Address server) throws IOException { + accessChecker.requirePermission(getActiveUser(), "getRSGroupInfoOfServer", + null, Permission.Action.ADMIN); + } + + @Override + public void preSetRSGroupForTables(ObserverContext ctx, + Set tables, String groupName) throws IOException { + accessChecker.requirePermission(getActiveUser(), "setRSGroupForTables", + null, Permission.Action.ADMIN); + try (Admin admin = ctx.getEnvironment().getConnection().getAdmin()) { + for (TableName tableName : tables) { + // Skip checks for a table that does not exist + if (!admin.tableExists(tableName)) { + throw new TableNotFoundException(tableName); + } + } + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java new file mode 100644 index 000000000000..86b6100803f9 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java @@ -0,0 +1,418 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rsgroup; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.SetRSGroupForTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.SetRSGroupForTablesResponse; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Implementation of RSGroupAdminService defined in RSGroupAdmin.proto. This class calls + * {@link RSGroupInfoManagerImpl} for actual work, converts result to protocol buffer response, + * handles exceptions if any occurred and then calls the {@code RpcCallback} with the response. + */ +class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService { + + private static final Logger LOG = LoggerFactory.getLogger(RSGroupAdminServiceImpl.class); + + private MasterServices master; + + private RSGroupInfoManager rsGroupInfoManager; + + RSGroupAdminServiceImpl() { + } + + void initialize(MasterServices masterServices){ + this.master = masterServices; + this.rsGroupInfoManager = masterServices.getRSRSGroupInfoManager(); + } + + // for backward compatible + private RSGroupInfo fillTables(RSGroupInfo rsGroupInfo) throws IOException { + return RSGroupUtil.fillTables(rsGroupInfo, master.getTableDescriptors().getAll().values()); + } + + @Override + public void getRSGroupInfo(RpcController controller, GetRSGroupInfoRequest request, + RpcCallback done) { + GetRSGroupInfoResponse.Builder builder = GetRSGroupInfoResponse.newBuilder(); + String groupName = request.getRSGroupName(); + LOG.info( + master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" + groupName); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preGetRSGroupInfo(groupName); + } + RSGroupInfo rsGroupInfo = rsGroupInfoManager.getRSGroup(groupName); + if (rsGroupInfo != null) { + builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(fillTables(rsGroupInfo))); + } + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postGetRSGroupInfo(groupName); + } + } catch (IOException e) { + CoprocessorRpcUtils.setControllerException(controller, e); + } + done.run(builder.build()); + } + + @Override + public void getRSGroupInfoOfTable(RpcController controller, GetRSGroupInfoOfTableRequest request, + RpcCallback done) { + GetRSGroupInfoOfTableResponse.Builder builder = GetRSGroupInfoOfTableResponse.newBuilder(); + TableName tableName = ProtobufUtil.toTableName(request.getTableName()); + LOG.info( + master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" + tableName); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName); + } + Optional optGroup = + RSGroupUtil.getRSGroupInfo(master, rsGroupInfoManager, tableName); + if (optGroup.isPresent()) { + builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(fillTables(optGroup.get()))); + } else { + if (master.getTableStateManager().isTablePresent(tableName)) { + RSGroupInfo rsGroupInfo = rsGroupInfoManager.getRSGroup(RSGroupInfo.DEFAULT_GROUP); + builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(fillTables(rsGroupInfo))); + } + } + + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName); + } + } catch (IOException e) { + CoprocessorRpcUtils.setControllerException(controller, e); + } + done.run(builder.build()); + } + + @Override + public void moveServers(RpcController controller, MoveServersRequest request, + RpcCallback done) { + MoveServersResponse.Builder builder = MoveServersResponse.newBuilder(); + Set
hostPorts = Sets.newHashSet(); + for (HBaseProtos.ServerName el : request.getServersList()) { + hostPorts.add(Address.fromParts(el.getHostName(), el.getPort())); + } + LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup " + + request.getTargetGroup()); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup()); + } + rsGroupInfoManager.moveServers(hostPorts, request.getTargetGroup()); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup()); + } + } catch (IOException e) { + CoprocessorRpcUtils.setControllerException(controller, e); + } + done.run(builder.build()); + } + + private void moveTablesAndWait(Set tables, String targetGroup) throws IOException { + List procIds = new ArrayList(); + for (TableName tableName : tables) { + TableDescriptor oldTd = master.getTableDescriptors().get(tableName); + if (oldTd == null) { + continue; + } + TableDescriptor newTd = + TableDescriptorBuilder.newBuilder(oldTd).setRegionServerGroup(targetGroup).build(); + procIds.add(master.modifyTable(tableName, newTd, HConstants.NO_NONCE, HConstants.NO_NONCE)); + } + for (long procId : procIds) { + Procedure proc = master.getMasterProcedureExecutor().getProcedure(procId); + if (proc == null) { + continue; + } + ProcedureSyncWait.waitForProcedureToCompleteIOE(master.getMasterProcedureExecutor(), proc, + Long.MAX_VALUE); + } + } + + @Override + public void moveTables(RpcController controller, MoveTablesRequest request, + RpcCallback done) { + MoveTablesResponse.Builder builder = MoveTablesResponse.newBuilder(); + Set tables = new HashSet<>(request.getTableNameList().size()); + for (HBaseProtos.TableName tableName : request.getTableNameList()) { + tables.add(ProtobufUtil.toTableName(tableName)); + } + LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables + " to rsgroup " + + request.getTargetGroup()); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preMoveTables(tables, request.getTargetGroup()); + } + moveTablesAndWait(tables, request.getTargetGroup()); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postMoveTables(tables, request.getTargetGroup()); + } + } catch (IOException e) { + CoprocessorRpcUtils.setControllerException(controller, e); + } + done.run(builder.build()); + } + + @Override + public void addRSGroup(RpcController controller, AddRSGroupRequest request, + RpcCallback done) { + AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder(); + LOG.info(master.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName()); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName()); + } + rsGroupInfoManager.addRSGroup(new RSGroupInfo(request.getRSGroupName())); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName()); + } + } catch (IOException e) { + CoprocessorRpcUtils.setControllerException(controller, e); + } + done.run(builder.build()); + } + + @Override + public void removeRSGroup(RpcController controller, RemoveRSGroupRequest request, + RpcCallback done) { + RemoveRSGroupResponse.Builder builder = RemoveRSGroupResponse.newBuilder(); + LOG.info(master.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName()); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName()); + } + rsGroupInfoManager.removeRSGroup(request.getRSGroupName()); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName()); + } + } catch (IOException e) { + CoprocessorRpcUtils.setControllerException(controller, e); + } + done.run(builder.build()); + } + + @Override + public void balanceRSGroup(RpcController controller, BalanceRSGroupRequest request, + RpcCallback done) { + BalanceRSGroupResponse.Builder builder = BalanceRSGroupResponse.newBuilder(); + LOG.info( + master.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName()); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName()); + } + boolean balancerRan = rsGroupInfoManager.balanceRSGroup(request.getRSGroupName()); + builder.setBalanceRan(balancerRan); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(), balancerRan); + } + } catch (IOException e) { + CoprocessorRpcUtils.setControllerException(controller, e); + builder.setBalanceRan(false); + } + done.run(builder.build()); + } + + @Override + public void listRSGroupInfos(RpcController controller, ListRSGroupInfosRequest request, + RpcCallback done) { + ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder(); + LOG.info(master.getClientIdAuditPrefix() + " list rsgroup"); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preListRSGroups(); + } + List rsGroupInfos = rsGroupInfoManager.listRSGroups().stream() + .map(RSGroupInfo::new).collect(Collectors.toList()); + Map name2Info = new HashMap<>(); + for (RSGroupInfo rsGroupInfo : rsGroupInfos) { + name2Info.put(rsGroupInfo.getName(), rsGroupInfo); + } + for (TableDescriptor td : master.getTableDescriptors().getAll().values()) { + String groupName = td.getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP); + RSGroupInfo rsGroupInfo = name2Info.get(groupName); + if (rsGroupInfo != null) { + rsGroupInfo.addTable(td.getTableName()); + } + } + for (RSGroupInfo rsGroupInfo : rsGroupInfos) { + // TODO: this can be done at once outside this loop, do not need to scan all every time. + builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)); + } + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postListRSGroups(); + } + } catch (IOException e) { + CoprocessorRpcUtils.setControllerException(controller, e); + } + done.run(builder.build()); + } + + @Override + public void getRSGroupInfoOfServer(RpcController controller, + GetRSGroupInfoOfServerRequest request, RpcCallback done) { + GetRSGroupInfoOfServerResponse.Builder builder = GetRSGroupInfoOfServerResponse.newBuilder(); + Address hp = + Address.fromParts(request.getServer().getHostName(), request.getServer().getPort()); + LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp); + } + RSGroupInfo info = rsGroupInfoManager.getRSGroupOfServer(hp); + if (info != null) { + builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(fillTables(info))); + } + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp); + } + } catch (IOException e) { + CoprocessorRpcUtils.setControllerException(controller, e); + } + done.run(builder.build()); + } + + @Override + public void moveServersAndTables(RpcController controller, MoveServersAndTablesRequest request, + RpcCallback done) { + MoveServersAndTablesResponse.Builder builder = MoveServersAndTablesResponse.newBuilder(); + Set
hostPorts = Sets.newHashSet(); + for (HBaseProtos.ServerName el : request.getServersList()) { + hostPorts.add(Address.fromParts(el.getHostName(), el.getPort())); + } + Set tables = new HashSet<>(request.getTableNameList().size()); + for (HBaseProtos.TableName tableName : request.getTableNameList()) { + tables.add(ProtobufUtil.toTableName(tableName)); + } + LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " and tables " + + tables + " to rsgroup" + request.getTargetGroup()); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preMoveServersAndTables(hostPorts, tables, + request.getTargetGroup()); + } + rsGroupInfoManager.moveServers(hostPorts, request.getTargetGroup()); + moveTablesAndWait(tables, request.getTargetGroup()); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postMoveServersAndTables(hostPorts, tables, + request.getTargetGroup()); + } + } catch (IOException e) { + CoprocessorRpcUtils.setControllerException(controller, e); + } + done.run(builder.build()); + } + + @Override + public void removeServers(RpcController controller, RemoveServersRequest request, + RpcCallback done) { + RemoveServersResponse.Builder builder = RemoveServersResponse.newBuilder(); + Set
servers = Sets.newHashSet(); + for (HBaseProtos.ServerName el : request.getServersList()) { + servers.add(Address.fromParts(el.getHostName(), el.getPort())); + } + LOG.info( + master.getClientIdAuditPrefix() + " remove decommissioned servers from rsgroup: " + servers); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preRemoveServers(servers); + } + rsGroupInfoManager.removeServers(servers); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postRemoveServers(servers); + } + } catch (IOException e) { + CoprocessorRpcUtils.setControllerException(controller, e); + } + done.run(builder.build()); + } + + @Override + public void setRSGroupForTables(RpcController controller, SetRSGroupForTablesRequest request, + RpcCallback done) { + SetRSGroupForTablesResponse.Builder builder = SetRSGroupForTablesResponse.newBuilder(); + Set tables = new HashSet<>(request.getTableNameList().size()); + for (HBaseProtos.TableName tableName : request.getTableNameList()) { + tables.add(ProtobufUtil.toTableName(tableName)); + } + LOG.info(master.getClientIdAuditPrefix() + " set tables " + tables + " to rsgroup " + + request.getTargetGroup()); + try { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preSetRSGroupForTables(tables, request.getTargetGroup()); + } + moveTablesAndWait(tables, request.getTargetGroup()); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postSetRSGroupForTables(tables, request.getTargetGroup()); + } + } catch (IOException e) { + CoprocessorRpcUtils.setControllerException(controller, e); + } + done.run(builder.build()); + } +} \ No newline at end of file diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java similarity index 82% rename from hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index 9709fb550d9b..cb514c139ea6 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rsgroup; import java.io.IOException; @@ -28,7 +27,6 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HBaseIOException; @@ -56,18 +54,17 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; /** - * GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721) - * It does region balance based on a table's group membership. - * - * Most assignment methods contain two exclusive code paths: Online - when the group - * table is online and Offline - when it is unavailable. - * - * During Offline, assignments are assigned based on cached information in zookeeper. - * If unavailable (ie bootstrap) then regions are assigned randomly. - * - * Once the GROUP table has been assigned, the balancer switches to Online and will then - * start providing appropriate assignments for user tables. - * + * GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721) It does + * region balance based on a table's group membership. + *

+ * Most assignment methods contain two exclusive code paths: Online - when the group table is online + * and Offline - when it is unavailable. + *

+ * During Offline, assignments are assigned based on cached information in zookeeper. If unavailable + * (ie bootstrap) then regions are assigned randomly. + *

+ * Once the GROUP table has been assigned, the balancer switches to Online and will then start + * providing appropriate assignments for user tables. */ @InterfaceAudience.Private public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { @@ -113,16 +110,16 @@ public void setMasterServices(MasterServices masterServices) { @Override public List balanceCluster(TableName tableName, Map> - clusterState) throws HBaseIOException { + clusterState) throws IOException { return balanceCluster(clusterState); } @Override public List balanceCluster(Map> clusterState) - throws HBaseIOException { + throws IOException { if (!isOnline()) { - throw new ConstraintException(RSGroupInfoManager.RSGROUP_TABLE_NAME + - " is not online, unable to perform balance"); + throw new ConstraintException( + RSGroupInfoManager.class.getSimpleName() + " is not online, unable to perform balance"); } // Calculate correct assignments and a list of RegionPlan for mis-placed regions @@ -171,7 +168,7 @@ public List balanceCluster(Map> cluster @Override public Map> roundRobinAssignment( - List regions, List servers) throws HBaseIOException { + List regions, List servers) throws IOException { Map> assignments = Maps.newHashMap(); ListMultimap regionMap = ArrayListMultimap.create(); ListMultimap serverMap = ArrayListMultimap.create(); @@ -203,13 +200,12 @@ public Map> retainAssignment( Map> assignments = new TreeMap<>(); ListMultimap groupToRegion = ArrayListMultimap.create(); Set misplacedRegions = getMisplacedRegions(regions); + RSGroupInfo defaultInfo = rsGroupInfoManager.getRSGroup(RSGroupInfo.DEFAULT_GROUP); for (RegionInfo region : regions.keySet()) { if (!misplacedRegions.contains(region)) { - String groupName = rsGroupInfoManager.getRSGroupOfTable(region.getTable()); - if (groupName == null) { - LOG.debug("Group not found for table " + region.getTable() + ", using default"); - groupName = RSGroupInfo.DEFAULT_GROUP; - } + String groupName = + RSGroupUtil.getRSGroupInfo(masterServices, rsGroupInfoManager, region.getTable()) + .orElse(defaultInfo).getName(); groupToRegion.put(groupName, region); } } @@ -237,15 +233,11 @@ public Map> retainAssignment( } for (RegionInfo region : misplacedRegions) { - String groupName = rsGroupInfoManager.getRSGroupOfTable(region.getTable()); - if (groupName == null) { - LOG.debug("Group not found for table " + region.getTable() + ", using default"); - groupName = RSGroupInfo.DEFAULT_GROUP; - } - RSGroupInfo info = rsGroupInfoManager.getRSGroup(groupName); + RSGroupInfo info = + RSGroupUtil.getRSGroupInfo(masterServices, rsGroupInfoManager, region.getTable()) + .orElse(defaultInfo); List candidateList = filterOfflineServers(info, servers); - ServerName server = this.internalBalancer.randomAssignment(region, - candidateList); + ServerName server = this.internalBalancer.randomAssignment(region, candidateList); if (server != null) { assignments.computeIfAbsent(server, s -> new ArrayList<>()).add(region); } else { @@ -261,7 +253,7 @@ public Map> retainAssignment( @Override public ServerName randomAssignment(RegionInfo region, - List servers) throws HBaseIOException { + List servers) throws IOException { ListMultimap regionMap = LinkedListMultimap.create(); ListMultimap serverMap = LinkedListMultimap.create(); generateGroupMaps(Lists.newArrayList(region), servers, regionMap, serverMap); @@ -269,18 +261,15 @@ public ServerName randomAssignment(RegionInfo region, return this.internalBalancer.randomAssignment(region, filteredServers); } - private void generateGroupMaps( - List regions, - List servers, - ListMultimap regionMap, - ListMultimap serverMap) throws HBaseIOException { + private void generateGroupMaps(List regions, List servers, + ListMultimap regionMap, ListMultimap serverMap) + throws HBaseIOException { try { + RSGroupInfo defaultInfo = rsGroupInfoManager.getRSGroup(RSGroupInfo.DEFAULT_GROUP); for (RegionInfo region : regions) { - String groupName = rsGroupInfoManager.getRSGroupOfTable(region.getTable()); - if (groupName == null) { - LOG.debug("Group not found for table " + region.getTable() + ", using default"); - groupName = RSGroupInfo.DEFAULT_GROUP; - } + String groupName = + RSGroupUtil.getRSGroupInfo(masterServices, rsGroupInfoManager, region.getTable()) + .orElse(defaultInfo).getName(); regionMap.put(groupName, region); } for (String groupKey : regionMap.keySet()) { @@ -332,32 +321,26 @@ private List filterServers(Set

servers, } @VisibleForTesting - public Set getMisplacedRegions( - Map regions) throws IOException { + public Set getMisplacedRegions(Map regions) + throws IOException { Set misplacedRegions = new HashSet<>(); - for(Map.Entry region : regions.entrySet()) { + RSGroupInfo defaultGroupInfo = rsGroupInfoManager.getRSGroup(RSGroupInfo.DEFAULT_GROUP); + for (Map.Entry region : regions.entrySet()) { RegionInfo regionInfo = region.getKey(); ServerName assignedServer = region.getValue(); - String groupName = rsGroupInfoManager.getRSGroupOfTable(regionInfo.getTable()); - if (groupName == null) { - LOG.debug("Group not found for table " + regionInfo.getTable() + ", using default"); - groupName = RSGroupInfo.DEFAULT_GROUP; - } - RSGroupInfo info = rsGroupInfoManager.getRSGroup(groupName); if (assignedServer == null) { LOG.debug("There is no assigned server for {}", region); continue; } - RSGroupInfo otherInfo = rsGroupInfoManager.getRSGroupOfServer(assignedServer.getAddress()); - if (info == null && otherInfo == null) { - LOG.warn("Couldn't obtain rs group information for {} on {}", region, assignedServer); - continue; - } - if ((info == null || !info.containsServer(assignedServer.getAddress()))) { - LOG.debug("Found misplaced region: " + regionInfo.getRegionNameAsString() + - " on server: " + assignedServer + - " found in group: " + otherInfo + - " outside of group: " + (info == null ? "UNKNOWN" : info.getName())); + RSGroupInfo info = + RSGroupUtil.getRSGroupInfo(masterServices, rsGroupInfoManager, regionInfo.getTable()) + .orElse(defaultGroupInfo); + if (!info.containsServer(assignedServer.getAddress())) { + RSGroupInfo otherInfo = rsGroupInfoManager.getRSGroupOfServer(assignedServer.getAddress()); + LOG.debug( + "Found misplaced region: {} on server: {} found in group: {} outside of group: {}", + regionInfo.getRegionNameAsString(), assignedServer, + otherInfo != null ? otherInfo.getName() : "UNKNOWN", info.getName()); misplacedRegions.add(regionInfo); } } @@ -365,11 +348,11 @@ public Set getMisplacedRegions( } private Pair>, List> correctAssignments( - Map> existingAssignments) throws HBaseIOException{ + Map> existingAssignments) throws IOException { // To return Map> correctAssignments = new TreeMap<>(); List regionPlansForMisplacedRegions = new ArrayList<>(); - + RSGroupInfo defaultInfo = rsGroupInfoManager.getRSGroup(RSGroupInfo.DEFAULT_GROUP); for (Map.Entry> assignments : existingAssignments.entrySet()){ ServerName currentHostServer = assignments.getKey(); correctAssignments.put(currentHostServer, new LinkedList<>()); @@ -377,15 +360,11 @@ private Pair>, List> correctAssignm for (RegionInfo region : regions) { RSGroupInfo targetRSGInfo = null; try { - String groupName = rsGroupInfoManager.getRSGroupOfTable(region.getTable()); - if (groupName == null) { - LOG.debug("Group not found for table " + region.getTable() + ", using default"); - groupName = RSGroupInfo.DEFAULT_GROUP; - } - targetRSGInfo = rsGroupInfoManager.getRSGroup(groupName); + targetRSGInfo = + RSGroupUtil.getRSGroupInfo(masterServices, rsGroupInfoManager, region.getTable()) + .orElse(defaultInfo); } catch (IOException exp) { - LOG.debug("RSGroup information null for region of table " + region.getTable(), - exp); + LOG.debug("RSGroup information null for region of table " + region.getTable(), exp); } if (targetRSGInfo == null || !targetRSGInfo.containsServer(currentHostServer.getAddress())) { // region is mis-placed @@ -402,7 +381,7 @@ private Pair>, List> correctAssignm } @Override - public void initialize() throws HBaseIOException { + public void initialize() throws IOException { try { if (rsGroupInfoManager == null) { List cps = diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java similarity index 58% rename from hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java index 398e8a4008c1..de4085b04f76 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java @@ -15,38 +15,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rsgroup; import java.io.IOException; import java.util.List; import java.util.Set; - -import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.net.Address; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * Interface used to manage RSGroupInfo storage. An implementation - * has the option to support offline mode. - * See {@link RSGroupBasedLoadBalancer} + * Interface used to manage RSGroupInfo storage. An implementation has the option to support offline + * mode. See {@code RSGroupBasedLoadBalancer}. */ @InterfaceAudience.Private public interface RSGroupInfoManager { - String REASSIGN_WAIT_INTERVAL_KEY = "hbase.rsgroup.reassign.wait"; - long DEFAULT_REASSIGN_WAIT_INTERVAL = 30 * 1000L; - - //Assigned before user tables - TableName RSGROUP_TABLE_NAME = - TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup"); - String rsGroupZNode = "rsgroup"; - byte[] META_FAMILY_BYTES = Bytes.toBytes("m"); - byte[] META_QUALIFIER_BYTES = Bytes.toBytes("i"); - byte[] ROW_KEY = {0}; - void start(); /** @@ -62,12 +47,10 @@ public interface RSGroupInfoManager { /** * Move servers to a new group. * @param servers list of servers, must be part of the same group - * @param srcGroup groupName being moved from - * @param dstGroup groupName being moved to + * @param targetGroupName groupName being moved to * @return Set of servers moved (May be a subset of {@code servers}). */ - Set
moveServers(Set
servers, String srcGroup, String dstGroup) - throws IOException; + void moveServers(Set
servers, String targetGroupName) throws IOException; /** * Gets the group info of server. @@ -80,48 +63,44 @@ Set
moveServers(Set
servers, String srcGroup, String dstGroup) RSGroupInfo getRSGroup(String groupName) throws IOException; /** - * Get the group membership of a table + * List the existing {@code RSGroupInfo}s. */ - String getRSGroupOfTable(TableName tableName) throws IOException; + List listRSGroups() throws IOException; /** - * Set the group membership of a set of tables - * - * @param tableNames set of tables to move - * @param groupName name of group of tables to move to + * Whether the manager is able to fully return group metadata + * @return whether the manager is in online mode */ - void moveTables(Set tableNames, String groupName) throws IOException; + boolean isOnline(); /** - * List the existing {@code RSGroupInfo}s. + * Remove decommissioned servers from rsgroup + * @param servers set of servers to remove */ - List listRSGroups() throws IOException; + void removeServers(Set
servers) throws IOException; /** - * Refresh/reload the group information from the persistent store + * Get {@code RSGroupInfo} for the given table. */ - void refresh() throws IOException; + RSGroupInfo getRSGroupForTable(TableName tableName) throws IOException; - /** - * Whether the manager is able to fully return group metadata - * - * @return whether the manager is in online mode - */ - boolean isOnline(); + static RSGroupInfoManager create(MasterServices master) throws IOException { + return RSGroupInfoManagerImpl.getInstance(master); + } /** - * Move servers and tables to a new group. - * @param servers list of servers, must be part of the same group - * @param tables set of tables to move - * @param srcGroup groupName being moved from - * @param dstGroup groupName being moved to + * Balance a rs group + * @param groupName name of the group + * @return true if balancer run + * @throws IOException */ - void moveServersAndTables(Set
servers, Set tables, - String srcGroup, String dstGroup) throws IOException; + boolean balanceRSGroup(String groupName) throws IOException; /** - * Remove decommissioned servers from rsgroup - * @param servers set of servers to remove + * Set group for tables + * @param tables + * @param groupName + * @throws IOException */ - void removeServers(Set
servers) throws IOException; + void setRSGroup(Set tables, String groupName) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java new file mode 100644 index 000000000000..a845b3db1382 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -0,0 +1,1254 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rsgroup; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.function.Function; +import org.apache.commons.lang3.StringUtils; +import java.util.OptionalLong; +import java.util.SortedSet; +import java.util.TreeSet; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.AsyncClusterConnection; +import org.apache.hadoop.hbase.client.AsyncTable; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.master.assignment.RegionStateNode; +import org.apache.hadoop.hbase.master.LoadBalancer; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.ServerListener; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.master.TableStateManager; +import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; +import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.protobuf.ProtobufMagic; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; +import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService; +import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest; +import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FutureUtils; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZNodePaths; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.KeeperException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; + +/** + * This is an implementation of {@link RSGroupInfoManager} which makes use of an HBase table as the + * persistence store for the group information. It also makes use of zookeeper to store group + * information needed for bootstrapping during offline mode. + *

Concurrency

RSGroup state is kept locally in Maps. There is a rsgroup name to cached + * RSGroupInfo Map at {@link #rsGroupMap}. These Maps are persisted to the hbase:rsgroup table (and cached in + * zk) on each modification. + *

+ * Mutations on state are synchronized but reads can continue without having to wait on an instance + * monitor, mutations do wholesale replace of the Maps on update -- Copy-On-Write; the local Maps of + * state are read-only, just-in-case (see flushConfig). + *

+ * Reads must not block else there is a danger we'll deadlock. + *

+ * Clients of this class, the {@link RSGroupAdminEndpoint} for example, want to query and then act + * on the results of the query modifying cache in zookeeper without another thread making + * intermediate modifications. These clients synchronize on the 'this' instance so no other has + * access concurrently. Reads must be able to continue concurrently. + */ +@InterfaceAudience.Private +final class RSGroupInfoManagerImpl implements RSGroupInfoManager { + private static final Logger LOG = LoggerFactory.getLogger(RSGroupInfoManagerImpl.class); + + // Assigned before user tables + @VisibleForTesting + static final TableName RSGROUP_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup"); + + @VisibleForTesting + static final String KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE = "should keep at least " + + "one server in 'default' RSGroup."; + + /** Define the config key of retries threshold when movements failed */ + @VisibleForTesting + static final String FAILED_MOVE_MAX_RETRY = "hbase.rsgroup.move.max.retry"; + + /** Define the default number of retries */ + @VisibleForTesting + static final int DEFAULT_MAX_RETRY_VALUE = 50; + + private static final String RS_GROUP_ZNODE = "rsgroup"; + + @VisibleForTesting + static final byte[] META_FAMILY_BYTES = Bytes.toBytes("m"); + + @VisibleForTesting + static final byte[] META_QUALIFIER_BYTES = Bytes.toBytes("i"); + + @VisibleForTesting + static final String MIGRATE_THREAD_NAME = "Migrate-RSGroup-Tables"; + + private static final byte[] ROW_KEY = { 0 }; + + /** Table descriptor for hbase:rsgroup catalog table */ + private static final TableDescriptor RSGROUP_TABLE_DESC; + static { + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(RSGROUP_TABLE_NAME) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(META_FAMILY_BYTES)) + .setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName()); + try { + builder.setCoprocessor( + CoprocessorDescriptorBuilder.newBuilder(MultiRowMutationEndpoint.class.getName()) + .setPriority(Coprocessor.PRIORITY_SYSTEM).build()); + } catch (IOException ex) { + throw new Error(ex); + } + RSGROUP_TABLE_DESC = builder.build(); + } + + // There two Maps are immutable and wholesale replaced on each modification + // so are safe to access concurrently. See class comment. + private static final class RSGroupInfoHolder { + final ImmutableMap groupName2Group; + final ImmutableMap tableName2Group; + + RSGroupInfoHolder() { + this(Collections.emptyMap()); + } + + RSGroupInfoHolder(Map rsGroupMap) { + ImmutableMap.Builder group2Name2GroupBuilder = ImmutableMap.builder(); + ImmutableMap.Builder tableName2GroupBuilder = ImmutableMap.builder(); + rsGroupMap.forEach((groupName, rsGroupInfo) -> { + group2Name2GroupBuilder.put(groupName, rsGroupInfo); + if (!groupName.equals(RSGroupInfo.DEFAULT_GROUP)) { + rsGroupInfo.getTables() + .forEach(tableName -> tableName2GroupBuilder.put(tableName, rsGroupInfo)); + } + }); + this.groupName2Group = group2Name2GroupBuilder.build(); + this.tableName2Group = tableName2GroupBuilder.build(); + } + } + + private volatile RSGroupInfoHolder holder = new RSGroupInfoHolder(); + + private final MasterServices masterServices; + private final AsyncClusterConnection conn; + private final ZKWatcher watcher; + private final RSGroupStartupWorker rsGroupStartupWorker; + // contains list of groups that were last flushed to persistent store + private Set prevRSGroups = new HashSet<>(); + private final ServerEventsListenerThread serverEventsListenerThread = + new ServerEventsListenerThread(); + + private RSGroupInfoManagerImpl(MasterServices masterServices) { + this.masterServices = masterServices; + this.watcher = masterServices.getZooKeeper(); + this.conn = masterServices.getAsyncClusterConnection(); + this.rsGroupStartupWorker = new RSGroupStartupWorker(); + } + + private synchronized void init() throws IOException { + refresh(false); + serverEventsListenerThread.start(); + masterServices.getServerManager().registerListener(serverEventsListenerThread); + migrate(); + } + + static RSGroupInfoManager getInstance(MasterServices masterServices) throws IOException { + RSGroupInfoManagerImpl instance = new RSGroupInfoManagerImpl(masterServices); + instance.init(); + return instance; + } + + public void start() { + // create system table of rsgroup + rsGroupStartupWorker.start(); + } + + @Override + public synchronized void addRSGroup(RSGroupInfo rsGroupInfo) throws IOException { + checkGroupName(rsGroupInfo.getName()); + Map rsGroupMap = holder.groupName2Group; + if (rsGroupMap.get(rsGroupInfo.getName()) != null || + rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { + throw new ConstraintException("Group already exists: " + rsGroupInfo.getName()); + } + Map newGroupMap = Maps.newHashMap(rsGroupMap); + newGroupMap.put(rsGroupInfo.getName(), rsGroupInfo); + flushConfig(newGroupMap); + } + + private RSGroupInfo getRSGroupInfo(final String groupName) throws ConstraintException { + RSGroupInfo rsGroupInfo = holder.groupName2Group.get(groupName); + if (rsGroupInfo == null) { + throw new ConstraintException("RSGroup " + groupName + " does not exist"); + } + return rsGroupInfo; + } + + /** + * @param masterServices the masterServices to get online servers for + * @return Set of online Servers named for their hostname and port (not ServerName). + */ + private static Set

getOnlineServers(final MasterServices masterServices) { + Set
onlineServers = new HashSet
(); + if (masterServices == null) { + return onlineServers; + } + + for (ServerName server : masterServices.getServerManager().getOnlineServers().keySet()) { + onlineServers.add(server.getAddress()); + } + return onlineServers; + } + + public synchronized Set
moveServers(Set
servers, String srcGroup, + String dstGroup) throws IOException { + RSGroupInfo src = getRSGroupInfo(srcGroup); + RSGroupInfo dst = getRSGroupInfo(dstGroup); + // If destination is 'default' rsgroup, only add servers that are online. If not online, drop + // it. If not 'default' group, add server to 'dst' rsgroup EVEN IF IT IS NOT online (could be a + // rsgroup of dead servers that are to come back later). + Set
onlineServers = + dst.getName().equals(RSGroupInfo.DEFAULT_GROUP) ? getOnlineServers(this.masterServices) + : null; + for (Address el : servers) { + src.removeServer(el); + if (onlineServers != null) { + if (!onlineServers.contains(el)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Dropping " + el + " during move-to-default rsgroup because not online"); + } + continue; + } + } + dst.addServer(el); + } + Map newGroupMap = Maps.newHashMap(holder.groupName2Group); + newGroupMap.put(src.getName(), src); + newGroupMap.put(dst.getName(), dst); + flushConfig(newGroupMap); + return dst.getServers(); + } + + @Override + public RSGroupInfo getRSGroupOfServer(Address serverHostPort) throws IOException { + for (RSGroupInfo info : holder.groupName2Group.values()) { + if (info.containsServer(serverHostPort)) { + return info; + } + } + return null; + } + + @Override + public RSGroupInfo getRSGroup(String groupName) throws IOException { + return holder.groupName2Group.get(groupName); + } + + @Override + public synchronized void removeRSGroup(String groupName) throws IOException { + RSGroupInfo rsGroupInfo = getRSGroupInfo(groupName); + int serverCount = rsGroupInfo.getServers().size(); + if (serverCount > 0) { + throw new ConstraintException("RSGroup " + groupName + " has " + serverCount + + " servers; you must remove these servers from the RSGroup before" + + " the RSGroup can be removed."); + } + for (TableDescriptor td : masterServices.getTableDescriptors().getAll().values()) { + if (td.getRegionServerGroup().map(groupName::equals).orElse(false)) { + throw new ConstraintException("RSGroup " + groupName + " is already referenced by " + + td.getTableName() + "; you must remove all the tables from the rsgroup before " + + "the rsgroup can be removed."); + } + } + for (NamespaceDescriptor ns : masterServices.getClusterSchema().getNamespaces()) { + String nsGroup = ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); + if (nsGroup != null && nsGroup.equals(groupName)) { + throw new ConstraintException( + "RSGroup " + groupName + " is referenced by namespace: " + ns.getName()); + } + } + Map rsGroupMap = holder.groupName2Group; + if (!rsGroupMap.containsKey(groupName) || groupName.equals(RSGroupInfo.DEFAULT_GROUP)) { + throw new ConstraintException( + "Group " + groupName + " does not exist or is a reserved " + "group"); + } + Map newGroupMap = Maps.newHashMap(rsGroupMap); + newGroupMap.remove(groupName); + flushConfig(newGroupMap); + } + + @Override + public List listRSGroups() { + return Lists.newArrayList(holder.groupName2Group.values()); + } + + @Override + public boolean isOnline() { + return rsGroupStartupWorker.isOnline(); + } + + @Override + public synchronized void removeServers(Set
servers) throws IOException { + if (servers == null || servers.isEmpty()) { + throw new ConstraintException("The set of servers to remove cannot be null or empty."); + } + + // check the set of servers + checkForDeadOrOnlineServers(servers); + + Map rsGroupInfos = new HashMap(); + for (Address el : servers) { + RSGroupInfo rsGroupInfo = getRSGroupOfServer(el); + if (rsGroupInfo != null) { + RSGroupInfo newRsGroupInfo = rsGroupInfos.get(rsGroupInfo.getName()); + if (newRsGroupInfo == null) { + rsGroupInfo.removeServer(el); + rsGroupInfos.put(rsGroupInfo.getName(), rsGroupInfo); + } else { + newRsGroupInfo.removeServer(el); + rsGroupInfos.put(newRsGroupInfo.getName(), newRsGroupInfo); + } + } else { + LOG.warn("Server " + el + " does not belong to any rsgroup."); + } + } + + if (rsGroupInfos.size() > 0) { + Map newGroupMap = Maps.newHashMap(holder.groupName2Group); + newGroupMap.putAll(rsGroupInfos); + flushConfig(newGroupMap); + } + LOG.info("Remove decommissioned servers {} from RSGroup done", servers); + } + + private List retrieveGroupListFromGroupTable() throws IOException { + List rsGroupInfoList = Lists.newArrayList(); + AsyncTable table = conn.getTable(RSGROUP_TABLE_NAME); + try (ResultScanner scanner = table.getScanner(META_FAMILY_BYTES, META_QUALIFIER_BYTES)) { + for (Result result;;) { + result = scanner.next(); + if (result == null) { + break; + } + RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo + .parseFrom(result.getValue(META_FAMILY_BYTES, META_QUALIFIER_BYTES)); + rsGroupInfoList.add(ProtobufUtil.toGroupInfo(proto)); + } + } + return rsGroupInfoList; + } + + private List retrieveGroupListFromZookeeper() throws IOException { + String groupBasePath = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, RS_GROUP_ZNODE); + List RSGroupInfoList = Lists.newArrayList(); + // Overwrite any info stored by table, this takes precedence + try { + if (ZKUtil.checkExists(watcher, groupBasePath) != -1) { + List children = ZKUtil.listChildrenAndWatchForNewChildren(watcher, groupBasePath); + if (children == null) { + return RSGroupInfoList; + } + for (String znode : children) { + byte[] data = ZKUtil.getData(watcher, ZNodePaths.joinZNode(groupBasePath, znode)); + if (data.length > 0) { + ProtobufUtil.expectPBMagicPrefix(data); + ByteArrayInputStream bis = + new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length); + RSGroupInfoList + .add(ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis))); + } + } + LOG.debug("Read ZK GroupInfo count:" + RSGroupInfoList.size()); + } + } catch (KeeperException | DeserializationException | InterruptedException e) { + throw new IOException("Failed to read rsGroupZNode", e); + } + return RSGroupInfoList; + } + + private void migrate(Collection groupList) { + TableDescriptors tds = masterServices.getTableDescriptors(); + for (RSGroupInfo groupInfo : groupList) { + if (groupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { + continue; + } + SortedSet failedTables = new TreeSet<>(); + for (TableName tableName : groupInfo.getTables()) { + LOG.debug("Migrating {} in group {}", tableName, groupInfo.getName()); + TableDescriptor oldTd; + try { + oldTd = tds.get(tableName); + } catch (IOException e) { + LOG.warn("Failed to migrate {} in group {}", tableName, groupInfo.getName(), e); + failedTables.add(tableName); + continue; + } + if (oldTd == null) { + continue; + } + if (oldTd.getRegionServerGroup().isPresent()) { + // either we have already migrated it or that user has set the rs group using the new + // code which will set the group directly on table descriptor, skip. + LOG.debug("Skip migrating {} since it is already in group {}", tableName, + oldTd.getRegionServerGroup().get()); + continue; + } + TableDescriptor newTd = TableDescriptorBuilder.newBuilder(oldTd) + .setRegionServerGroup(groupInfo.getName()).build(); + // This is a bit tricky. Since we know that the region server group config in + // TableDescriptor will only be used at master side, it is fine to just update the table + // descriptor on file system and also the cache, without reopening all the regions. This + // will be much faster than the normal modifyTable. And when upgrading, we will update + // master first and then region server, so after all the region servers has been reopened, + // the new TableDescriptor will be loaded. + try { + tds.add(newTd); + } catch (IOException e) { + LOG.warn("Failed to migrate {} in group {}", tableName, groupInfo.getName(), e); + failedTables.add(tableName); + continue; + } + } + LOG.debug("Done migrating {}, failed tables {}", groupInfo.getName(), failedTables); + synchronized (RSGroupInfoManagerImpl.this) { + Map rsGroupMap = holder.groupName2Group; + RSGroupInfo currentInfo = rsGroupMap.get(groupInfo.getName()); + if (currentInfo != null) { + RSGroupInfo newInfo = + new RSGroupInfo(currentInfo.getName(), currentInfo.getServers(), failedTables); + Map newGroupMap = new HashMap<>(rsGroupMap); + newGroupMap.put(groupInfo.getName(), newInfo); + try { + flushConfig(newGroupMap); + } catch (IOException e) { + LOG.warn("Failed to persist rs group {}", newInfo.getName(), e); + } + } + } + } + } + + // Migrate the table rs group info from RSGroupInfo into the table descriptor + // Notice that we do not want to block the initialize so this will be done in background, and + // during the migrating, the rs group info maybe incomplete and cause region to be misplaced. + private void migrate() { + Thread migrateThread = new Thread(MIGRATE_THREAD_NAME) { + + @Override + public void run() { + LOG.info("Start migrating table rs group config"); + while (!masterServices.isStopped()) { + Collection groups = holder.groupName2Group.values(); + boolean hasTables = groups.stream().anyMatch(r -> !r.getTables().isEmpty()); + if (!hasTables) { + break; + } + migrate(groups); + } + LOG.info("Done migrating table rs group info"); + } + }; + migrateThread.setDaemon(true); + migrateThread.start(); + } + + /** + * Read rsgroup info from the source of truth, the hbase:rsgroup table. Update zk cache. Called on + * startup of the manager. + */ + private synchronized void refresh(boolean forceOnline) throws IOException { + List groupList = new ArrayList<>(); + + // Overwrite anything read from zk, group table is source of truth + // if online read from GROUP table + if (forceOnline || isOnline()) { + LOG.debug("Refreshing in Online mode."); + groupList.addAll(retrieveGroupListFromGroupTable()); + } else { + LOG.debug("Refreshing in Offline mode."); + groupList.addAll(retrieveGroupListFromZookeeper()); + } + + // This is added to the last of the list so it overwrites the 'default' rsgroup loaded + // from region group table or zk + groupList.add(new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, getDefaultServers())); + + // populate the data + HashMap newGroupMap = Maps.newHashMap(); + for (RSGroupInfo group : groupList) { + newGroupMap.put(group.getName(), group); + } + resetRSGroupMap(newGroupMap); + updateCacheOfRSGroups(newGroupMap.keySet()); + } + + private void flushConfigTable(Map groupMap) throws IOException { + List mutations = Lists.newArrayList(); + + // populate deletes + for (String groupName : prevRSGroups) { + if (!groupMap.containsKey(groupName)) { + Delete d = new Delete(Bytes.toBytes(groupName)); + mutations.add(d); + } + } + + // populate puts + for (RSGroupInfo gi : groupMap.values()) { + if (!gi.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { + RSGroupProtos.RSGroupInfo proto = ProtobufUtil.toProtoGroupInfo(gi); + Put p = new Put(Bytes.toBytes(gi.getName())); + p.addColumn(META_FAMILY_BYTES, META_QUALIFIER_BYTES, proto.toByteArray()); + mutations.add(p); + } + } + + if (mutations.size() > 0) { + multiMutate(mutations); + } + } + + private synchronized void flushConfig() throws IOException { + flushConfig(holder.groupName2Group); + } + + private synchronized void flushConfig(Map newGroupMap) throws IOException { + // For offline mode persistence is still unavailable + // We're refreshing in-memory state but only for servers in default group + if (!isOnline()) { + if (newGroupMap == holder.groupName2Group) { + // When newGroupMap is this.rsGroupMap itself, + // do not need to check default group and other groups as followed + return; + } + + Map oldGroupMap = Maps.newHashMap(holder.groupName2Group); + RSGroupInfo oldDefaultGroup = oldGroupMap.remove(RSGroupInfo.DEFAULT_GROUP); + RSGroupInfo newDefaultGroup = newGroupMap.remove(RSGroupInfo.DEFAULT_GROUP); + if (!oldGroupMap.equals(newGroupMap) /* compare both tables and servers in other groups */ || + !oldDefaultGroup.getTables().equals(newDefaultGroup.getTables()) + /* compare tables in default group */) { + throw new IOException("Only servers in default group can be updated during offline mode"); + } + + // Restore newGroupMap by putting its default group back + newGroupMap.put(RSGroupInfo.DEFAULT_GROUP, newDefaultGroup); + + // Refresh rsGroupMap + // according to the inputted newGroupMap (an updated copy of rsGroupMap) + this.holder = new RSGroupInfoHolder(newGroupMap); + + // Do not need to update tableMap + // because only the update on servers in default group is allowed above, + // or IOException will be thrown + return; + } + + /* For online mode, persist to hbase:rsgroup and Zookeeper */ + flushConfigTable(newGroupMap); + + // Make changes visible after having been persisted to the source of truth + resetRSGroupMap(newGroupMap); + saveRSGroupMapToZK(newGroupMap); + updateCacheOfRSGroups(newGroupMap.keySet()); + } + + private void saveRSGroupMapToZK(Map newGroupMap) throws IOException { + try { + String groupBasePath = + ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, RS_GROUP_ZNODE); + ZKUtil.createAndFailSilent(watcher, groupBasePath, ProtobufMagic.PB_MAGIC); + + List zkOps = new ArrayList<>(newGroupMap.size()); + for (String groupName : prevRSGroups) { + if (!newGroupMap.containsKey(groupName)) { + String znode = ZNodePaths.joinZNode(groupBasePath, groupName); + zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode)); + } + } + + for (RSGroupInfo gi : newGroupMap.values()) { + if (!gi.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { + String znode = ZNodePaths.joinZNode(groupBasePath, gi.getName()); + RSGroupProtos.RSGroupInfo proto = ProtobufUtil.toProtoGroupInfo(gi); + LOG.debug("Updating znode: " + znode); + ZKUtil.createAndFailSilent(watcher, znode); + zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode)); + zkOps.add(ZKUtil.ZKUtilOp.createAndFailSilent(znode, + ProtobufUtil.prependPBMagic(proto.toByteArray()))); + } + } + LOG.debug("Writing ZK GroupInfo count: " + zkOps.size()); + + ZKUtil.multiOrSequential(watcher, zkOps, false); + } catch (KeeperException e) { + LOG.error("Failed to write to rsGroupZNode", e); + masterServices.abort("Failed to write to rsGroupZNode", e); + throw new IOException("Failed to write to rsGroupZNode", e); + } + } + + /** + * Make changes visible. Caller must be synchronized on 'this'. + */ + private void resetRSGroupMap(Map newRSGroupMap) { + this.holder = new RSGroupInfoHolder(newRSGroupMap); + } + + /** + * Update cache of rsgroups. Caller must be synchronized on 'this'. + * @param currentGroups Current list of Groups. + */ + private void updateCacheOfRSGroups(final Set currentGroups) { + this.prevRSGroups.clear(); + this.prevRSGroups.addAll(currentGroups); + } + + // Called by getDefaultServers. Presume it has lock in place. + private List getOnlineRS() throws IOException { + if (masterServices != null) { + return masterServices.getServerManager().getOnlineServersList(); + } + LOG.debug("Reading online RS from zookeeper"); + List servers = new ArrayList<>(); + try { + for (String el : ZKUtil.listChildrenNoWatch(watcher, watcher.getZNodePaths().rsZNode)) { + servers.add(ServerName.parseServerName(el)); + } + } catch (KeeperException e) { + throw new IOException("Failed to retrieve server list from zookeeper", e); + } + return servers; + } + + // Called by ServerEventsListenerThread. Presume it has lock on this manager when it runs. + private SortedSet
getDefaultServers() throws IOException { + // Build a list of servers in other groups than default group, from rsGroupMap + Set
serversInOtherGroup = new HashSet<>(); + for (RSGroupInfo group : listRSGroups() /* get from rsGroupMap */) { + if (!RSGroupInfo.DEFAULT_GROUP.equals(group.getName())) { // not default group + serversInOtherGroup.addAll(group.getServers()); + } + } + + // Get all online servers from Zookeeper and find out servers in default group + SortedSet
defaultServers = Sets.newTreeSet(); + for (ServerName serverName : getOnlineRS()) { + Address server = Address.fromParts(serverName.getHostname(), serverName.getPort()); + if (!serversInOtherGroup.contains(server)) { // not in other groups + defaultServers.add(server); + } + } + return defaultServers; + } + + // Called by ServerEventsListenerThread. Synchronize on this because redoing + // the rsGroupMap then writing it out. + private synchronized void updateDefaultServers(SortedSet
servers) { + Map rsGroupMap = holder.groupName2Group; + RSGroupInfo info = rsGroupMap.get(RSGroupInfo.DEFAULT_GROUP); + RSGroupInfo newInfo = new RSGroupInfo(info.getName(), servers); + HashMap newGroupMap = Maps.newHashMap(rsGroupMap); + newGroupMap.put(newInfo.getName(), newInfo); + resetRSGroupMap(newGroupMap); + } + + /** + * Calls {@link RSGroupInfoManagerImpl#updateDefaultServers(SortedSet)} to update list of known + * servers. Notifications about server changes are received by registering {@link ServerListener}. + * As a listener, we need to return immediately, so the real work of updating the servers is done + * asynchronously in this thread. + */ + private class ServerEventsListenerThread extends Thread implements ServerListener { + private final Logger LOG = LoggerFactory.getLogger(ServerEventsListenerThread.class); + private boolean changed = false; + + ServerEventsListenerThread() { + setDaemon(true); + } + + @Override + public void serverAdded(ServerName serverName) { + serverChanged(); + } + + @Override + public void serverRemoved(ServerName serverName) { + serverChanged(); + } + + private synchronized void serverChanged() { + changed = true; + this.notify(); + } + + @Override + public void run() { + setName(ServerEventsListenerThread.class.getName() + "-" + masterServices.getServerName()); + SortedSet
prevDefaultServers = new TreeSet<>(); + while (isMasterRunning(masterServices)) { + try { + LOG.info("Updating default servers."); + SortedSet
servers = RSGroupInfoManagerImpl.this.getDefaultServers(); + if (!servers.equals(prevDefaultServers)) { + RSGroupInfoManagerImpl.this.updateDefaultServers(servers); + prevDefaultServers = servers; + LOG.info("Updated with servers: " + servers.size()); + } + try { + synchronized (this) { + while (!changed) { + wait(); + } + changed = false; + } + } catch (InterruptedException e) { + LOG.warn("Interrupted", e); + } + } catch (IOException e) { + LOG.warn("Failed to update default servers", e); + } + } + } + } + + private class RSGroupStartupWorker extends Thread { + private final Logger LOG = LoggerFactory.getLogger(RSGroupStartupWorker.class); + private volatile boolean online = false; + + RSGroupStartupWorker() { + super(RSGroupStartupWorker.class.getName() + "-" + masterServices.getServerName()); + setDaemon(true); + } + + @Override + public void run() { + if (waitForGroupTableOnline()) { + LOG.info("GroupBasedLoadBalancer is now online"); + } else { + LOG.warn("Quit without making region group table online"); + } + } + + private boolean waitForGroupTableOnline() { + while (isMasterRunning(masterServices)) { + try { + TableStateManager tsm = masterServices.getTableStateManager(); + if (!tsm.isTablePresent(RSGROUP_TABLE_NAME)) { + createRSGroupTable(); + } + // try reading from the table + FutureUtils.get(conn.getTable(RSGROUP_TABLE_NAME).get(new Get(ROW_KEY))); + LOG.info("RSGroup table={} is online, refreshing cached information", RSGROUP_TABLE_NAME); + RSGroupInfoManagerImpl.this.refresh(true); + online = true; + // flush any inconsistencies between ZK and HTable + RSGroupInfoManagerImpl.this.flushConfig(); + // migrate after we are online. + migrate(); + return true; + } catch (Exception e) { + LOG.warn("Failed to perform check", e); + // 100ms is short so let's just ignore the interrupt + Threads.sleepWithoutInterrupt(100); + } + } + return false; + } + + private void createRSGroupTable() throws IOException { + OptionalLong optProcId = masterServices.getProcedures().stream() + .filter(p -> p instanceof CreateTableProcedure).map(p -> (CreateTableProcedure) p) + .filter(p -> p.getTableName().equals(RSGROUP_TABLE_NAME)).mapToLong(Procedure::getProcId) + .findFirst(); + long procId; + if (optProcId.isPresent()) { + procId = optProcId.getAsLong(); + } else { + procId = masterServices.createSystemTable(RSGROUP_TABLE_DESC); + } + // wait for region to be online + int tries = 600; + while (!(masterServices.getMasterProcedureExecutor().isFinished(procId)) && + masterServices.getMasterProcedureExecutor().isRunning() && tries > 0) { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + throw new IOException("Wait interrupted ", e); + } + tries--; + } + if (tries <= 0) { + throw new IOException("Failed to create group table in a given time."); + } else { + Procedure result = masterServices.getMasterProcedureExecutor().getResult(procId); + if (result != null && result.isFailed()) { + throw new IOException("Failed to create group table. " + + MasterProcedureUtil.unwrapRemoteIOException(result)); + } + } + } + + public boolean isOnline() { + return online; + } + } + + private static boolean isMasterRunning(MasterServices masterServices) { + return !masterServices.isAborted() && !masterServices.isStopped(); + } + + private void multiMutate(List mutations) throws IOException { + MutateRowsRequest.Builder builder = MutateRowsRequest.newBuilder(); + for (Mutation mutation : mutations) { + if (mutation instanceof Put) { + builder + .addMutationRequest(ProtobufUtil.toMutation(MutationProto.MutationType.PUT, mutation)); + } else if (mutation instanceof Delete) { + builder.addMutationRequest( + ProtobufUtil.toMutation(MutationProto.MutationType.DELETE, mutation)); + } else { + throw new DoNotRetryIOException( + "multiMutate doesn't support " + mutation.getClass().getName()); + } + } + MutateRowsRequest request = builder.build(); + AsyncTable table = conn.getTable(RSGROUP_TABLE_NAME); + FutureUtils.get(table. coprocessorService( + MultiRowMutationService::newStub, + (stub, controller, done) -> stub.mutateRows(controller, request, done), ROW_KEY)); + } + + private void checkGroupName(String groupName) throws ConstraintException { + if (!groupName.matches("[a-zA-Z0-9_]+")) { + throw new ConstraintException("RSGroup name should only contain alphanumeric characters"); + } + } + + @Override + public RSGroupInfo getRSGroupForTable(TableName tableName) throws IOException { + return holder.tableName2Group.get(tableName); + } + + + /** + * Check if the set of servers are belong to dead servers list or online servers list. + * @param servers servers to remove + */ + private void checkForDeadOrOnlineServers(Set
servers) throws IOException { + // This uglyness is because we only have Address, not ServerName. + Set
onlineServers = new HashSet<>(); + List drainingServers = masterServices.getServerManager().getDrainingServersList(); + for (ServerName server : masterServices.getServerManager().getOnlineServers().keySet()) { + // Only online but not decommissioned servers are really online + if (!drainingServers.contains(server)) { + onlineServers.add(server.getAddress()); + } + } + + Set
deadServers = new HashSet<>(); + for(ServerName server: masterServices.getServerManager().getDeadServers().copyServerNames()) { + deadServers.add(server.getAddress()); + } + + for (Address address: servers) { + if (onlineServers.contains(address)) { + throw new DoNotRetryIOException( + "Server " + address + " is an online server, not allowed to remove."); + } + if (deadServers.contains(address)) { + throw new DoNotRetryIOException( + "Server " + address + " is on the dead servers list," + + " Maybe it will come back again, not allowed to remove."); + } + } + } + + private void checkOnlineServersOnly(Set
servers) throws IOException { + // This uglyness is because we only have Address, not ServerName. + // Online servers are keyed by ServerName. + Set
onlineServers = new HashSet<>(); + for(ServerName server: masterServices.getServerManager().getOnlineServers().keySet()) { + onlineServers.add(server.getAddress()); + } + for (Address address: servers) { + if (!onlineServers.contains(address)) { + throw new DoNotRetryIOException("Server " + address + + " is not an online server in 'default' RSGroup."); + } + } + } + + /** + * @return List of Regions associated with this server. + */ + private List getRegions(final Address server) { + LinkedList regions = new LinkedList<>(); + for (Map.Entry el : + masterServices.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { + if (el.getValue() == null) { + continue; + } + + if (el.getValue().getAddress().equals(server)) { + addRegion(regions, el.getKey()); + } + } + for (RegionStateNode state : masterServices.getAssignmentManager().getRegionsInTransition()) { + if (state.getRegionLocation() != null && + state.getRegionLocation().getAddress().equals(server)) { + addRegion(regions, state.getRegionInfo()); + } + } + return regions; + } + + private void addRegion(final LinkedList regions, RegionInfo hri) { + // If meta, move it last otherwise other unassigns fail because meta is not + // online for them to update state in. This is dodgy. Needs to be made more + // robust. See TODO below. + if (hri.isMetaRegion()) { + regions.addLast(hri); + } else { + regions.addFirst(hri); + } + } + + /** + * Move every region from servers which are currently located on these servers, but should not be + * located there. + * @param servers the servers that will move to new group + * @param targetGroupName the target group name + * @throws IOException if moving the server and tables fail + */ + private void moveServerRegionsFromGroup(Set
servers, String targetGroupName) + throws IOException { + moveRegionsBetweenGroups(servers, targetGroupName, rs -> getRegions(rs), info -> { + try { + String groupName = RSGroupUtil.getRSGroupInfo(masterServices, this, info.getTable()) + .map(RSGroupInfo::getName).orElse(RSGroupInfo.DEFAULT_GROUP); + return groupName.equals(targetGroupName); + } catch (IOException e) { + LOG.warn("Failed to test group for region {} and target group {}", info, targetGroupName); + return false; + } + }, rs -> rs.getHostname()); + } + + private void moveRegionsBetweenGroups(Set regionsOwners, String targetGroupName, + Function> getRegionsInfo, Function validation, + Function getOwnerName) throws IOException { + boolean hasRegionsToMove; + int retry = 0; + Set allOwners = new HashSet<>(regionsOwners); + Set failedRegions = new HashSet<>(); + IOException toThrow = null; + do { + hasRegionsToMove = false; + for (Iterator iter = allOwners.iterator(); iter.hasNext(); ) { + T owner = iter.next(); + // Get regions that are associated with this server and filter regions by group tables. + for (RegionInfo region : getRegionsInfo.apply(owner)) { + if (!validation.apply(region)) { + LOG.info("Moving region {}, which do not belong to RSGroup {}", + region.getShortNameToLog(), targetGroupName); + try { + this.masterServices.getAssignmentManager().move(region); + failedRegions.remove(region.getRegionNameAsString()); + } catch (IOException ioe) { + LOG.debug("Move region {} from group failed, will retry, current retry time is {}", + region.getShortNameToLog(), retry, ioe); + toThrow = ioe; + failedRegions.add(region.getRegionNameAsString()); + } + if (masterServices.getAssignmentManager().getRegionStates(). + getRegionState(region).isFailedOpen()) { + continue; + } + hasRegionsToMove = true; + } + } + + if (!hasRegionsToMove) { + LOG.info("No more regions to move from {} to RSGroup", getOwnerName.apply(owner)); + iter.remove(); + } + } + + retry++; + try { + wait(1000); + } catch (InterruptedException e) { + LOG.warn("Sleep interrupted", e); + Thread.currentThread().interrupt(); + } + } while (hasRegionsToMove && retry <= + masterServices.getConfiguration().getInt(FAILED_MOVE_MAX_RETRY, DEFAULT_MAX_RETRY_VALUE)); + + //has up to max retry time or there are no more regions to move + if (hasRegionsToMove) { + // print failed moved regions, for later process conveniently + String msg = String + .format("move regions for group %s failed, failed regions: %s", targetGroupName, + failedRegions); + LOG.error(msg); + throw new DoNotRetryIOException( + msg + ", just record the last failed region's cause, more details in server log", + toThrow); + } + } + + private boolean isTableInGroup(TableName tableName, String groupName, + Set tablesInGroupCache) throws IOException { + if (tablesInGroupCache.contains(tableName)) { + return true; + } + if (RSGroupUtil.getRSGroupInfo(masterServices, this, tableName).map(RSGroupInfo::getName) + .orElse(RSGroupInfo.DEFAULT_GROUP).equals(groupName)) { + tablesInGroupCache.add(tableName); + return true; + } + return false; + } + + private Map rsGroupGetRegionsInTransition(String groupName) + throws IOException { + Map rit = Maps.newTreeMap(); + Set tablesInGroupCache = new HashSet<>(); + for (RegionStateNode regionNode : masterServices.getAssignmentManager().getRegionsInTransition()) { + TableName tn = regionNode.getTable(); + if (isTableInGroup(tn, groupName, tablesInGroupCache)) { + rit.put(regionNode.getRegionInfo().getEncodedName(), regionNode.toRegionState()); + } + } + return rit; + } + + private Map>> + getRSGroupAssignmentsByTable(String groupName) throws IOException { + Map>> result = Maps.newHashMap(); + Set tablesInGroupCache = new HashSet<>(); + for (Map.Entry entry : masterServices.getAssignmentManager().getRegionStates() + .getRegionAssignments().entrySet()) { + RegionInfo region = entry.getKey(); + TableName tn = region.getTable(); + ServerName server = entry.getValue(); + if (isTableInGroup(tn, groupName, tablesInGroupCache)) { + result.computeIfAbsent(tn, k -> new HashMap<>()) + .computeIfAbsent(server, k -> new ArrayList<>()).add(region); + } + } + RSGroupInfo rsGroupInfo = getRSGroupInfo(groupName); + for (ServerName serverName : masterServices.getServerManager().getOnlineServers().keySet()) { + if (rsGroupInfo.containsServer(serverName.getAddress())) { + for (Map> map : result.values()) { + map.computeIfAbsent(serverName, k -> Collections.emptyList()); + } + } + } + + return result; + } + + @Override + public boolean balanceRSGroup(String groupName) throws IOException { + ServerManager serverManager = masterServices.getServerManager(); + LoadBalancer balancer = masterServices.getLoadBalancer(); + getRSGroupInfo(groupName); + + synchronized (balancer) { + // If balance not true, don't run balancer. + if (!masterServices.isBalancerOn()) { + return false; + } + // Only allow one balance run at at time. + Map groupRIT = rsGroupGetRegionsInTransition(groupName); + if (groupRIT.size() > 0) { + LOG.debug("Not running balancer because {} region(s) in transition: {}", groupRIT.size(), + StringUtils.abbreviate( + masterServices.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(), + 256)); + return false; + } + if (serverManager.areDeadServersInProgress()) { + LOG.debug("Not running balancer because processing dead regionserver(s): {}", + serverManager.getDeadServers()); + return false; + } + + // We balance per group instead of per table + List plans = new ArrayList<>(); + Map>> assignmentsByTable = + getRSGroupAssignmentsByTable(groupName); + for (Map.Entry>> tableMap : assignmentsByTable + .entrySet()) { + LOG.info("Creating partial plan for table {} : {}", tableMap.getKey(), tableMap.getValue()); + List partialPlans = balancer.balanceCluster(tableMap.getValue()); + LOG.info("Partial plan for table {} : {}", tableMap.getKey(), partialPlans); + if (partialPlans != null) { + plans.addAll(partialPlans); + } + } + boolean balancerRan = !plans.isEmpty(); + if (balancerRan) { + LOG.info("RSGroup balance {} starting with plan count: {}", groupName, plans.size()); + masterServices.executeRegionPlansWithThrottling(plans); + LOG.info("RSGroup balance " + groupName + " completed"); + } + return balancerRan; + } + } + + private void moveTablesAndWait(Set tables, String targetGroup) throws IOException { + List procIds = new ArrayList(); + for (TableName tableName : tables) { + TableDescriptor oldTd = masterServices.getTableDescriptors().get(tableName); + if (oldTd == null) { + continue; + } + TableDescriptor newTd = + TableDescriptorBuilder.newBuilder(oldTd).setRegionServerGroup(targetGroup).build(); + procIds.add(masterServices.modifyTable(tableName, newTd, HConstants.NO_NONCE, HConstants.NO_NONCE)); + } + for (long procId : procIds) { + Procedure proc = masterServices.getMasterProcedureExecutor().getProcedure(procId); + if (proc == null) { + continue; + } + ProcedureSyncWait.waitForProcedureToCompleteIOE(masterServices.getMasterProcedureExecutor(), proc, + Long.MAX_VALUE); + } + } + + @Override + public void setRSGroup(Set tables, String groupName) throws IOException { + getRSGroupInfo(groupName); + moveTablesAndWait(tables, groupName); + } + + public void moveServers(Set
servers, String targetGroupName) throws IOException { + if (servers == null) { + throw new ConstraintException("The list of servers to move cannot be null."); + } + if (servers.isEmpty()) { + // For some reason this difference between null servers and isEmpty is important distinction. + // TODO. Why? Stuff breaks if I equate them. + return; + } + if (StringUtils.isEmpty(targetGroupName)) { + throw new ConstraintException("RSGroup cannot be null."); + } + getRSGroupInfo(targetGroupName); + + // Hold a lock on the manager instance while moving servers to prevent + // another writer changing our state while we are working. + synchronized (this) { + // Presume first server's source group. Later ensure all servers are from this group. + Address firstServer = servers.iterator().next(); + RSGroupInfo srcGrp = getRSGroupOfServer(firstServer); + if (srcGrp == null) { + // Be careful. This exception message is tested for in TestRSGroupsBase... + throw new ConstraintException("Source RSGroup for server " + firstServer + + " does not exist."); + } + + // Only move online servers (when moving from 'default') or servers from other + // groups. This prevents bogus servers from entering groups + if (RSGroupInfo.DEFAULT_GROUP.equals(srcGrp.getName())) { + if (srcGrp.getServers().size() <= servers.size()) { + throw new ConstraintException(KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE); + } + checkOnlineServersOnly(servers); + } + // Ensure all servers are of same rsgroup. + for (Address server: servers) { + String tmpGroup = getRSGroupOfServer(server).getName(); + if (!tmpGroup.equals(srcGrp.getName())) { + throw new ConstraintException("Move server request should only come from one source " + + "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup); + } + } + if (srcGrp.getServers().size() <= servers.size()) { + // check if there are still tables reference this group + for (TableDescriptor td : masterServices.getTableDescriptors().getAll().values()) { + Optional optGroupName = td.getRegionServerGroup(); + if (optGroupName.isPresent() && optGroupName.get().equals(srcGrp.getName())) { + throw new ConstraintException( + "Cannot leave a RSGroup " + srcGrp.getName() + " that contains tables('" + + td.getTableName() + "' at least) without servers to host them."); + } + } + } + + // MovedServers may be < passed in 'servers'. + Set
movedServers = moveServers(servers, srcGrp.getName(), + targetGroupName); + moveServerRegionsFromGroup(movedServers, targetGroupName); + LOG.info("Move servers done: {} => {}", srcGrp.getName(), targetGroupName); + } + } + +} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java similarity index 97% rename from hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java index d1b375181ccb..a77d6b0588de 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java @@ -21,6 +21,7 @@ import java.util.Arrays; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; @@ -58,9 +59,9 @@ public int compactTTLRegionsOnGroup(Configuration conf, String rsgroup, int conc throws Exception { Connection conn = ConnectionFactory.createConnection(conf); - RSGroupAdmin rsGroupAdmin = new RSGroupAdminClient(conn); + Admin admin = conn.getAdmin(); - RSGroupInfo rsGroupInfo = rsGroupAdmin.getRSGroupInfo(rsgroup); + RSGroupInfo rsGroupInfo = admin.getRSGroupInfo(rsgroup); if (rsGroupInfo == null) { LOG.error("Invalid rsgroup specified: " + rsgroup); throw new IllegalArgumentException("Invalid rsgroup specified: " + rsgroup); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java new file mode 100644 index 000000000000..1e6799cda0ab --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.rsgroup; + +import java.io.IOException; +import java.util.Collection; +import java.util.Optional; +import java.util.function.Predicate; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.master.ClusterSchema; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Helper class for RSGroup implementation + */ +@InterfaceAudience.Private +public final class RSGroupUtil { + + private static final Logger LOG = LoggerFactory.getLogger(RSGroupUtil.class); + + private RSGroupUtil() { + } + + /** + * Will try to get the rsgroup from {@link TableDescriptor} first, and then try to get the rsgroup + * from the {@link NamespaceDescriptor}. If still not present, return empty. + */ + public static Optional getRSGroupInfo(MasterServices master, RSGroupInfoManager manager, + TableName tableName) throws IOException { + TableDescriptor td = master.getTableDescriptors().get(tableName); + if (td == null) { + return Optional.empty(); + } + Optional optGroupNameOfTable = td.getRegionServerGroup(); + if (optGroupNameOfTable.isPresent()) { + RSGroupInfo group = manager.getRSGroup(optGroupNameOfTable.get()); + if (group != null) { + return Optional.of(group); + } + } + // for backward compatible, where we may still have table configs in the RSGroupInfo after + // upgrading when migrating is still on-going. + RSGroupInfo groupFromOldRSGroupInfo = manager.getRSGroupForTable(tableName); + if (groupFromOldRSGroupInfo != null) { + return Optional.of(groupFromOldRSGroupInfo); + } + ClusterSchema clusterSchema = master.getClusterSchema(); + if (clusterSchema == null) { + if (TableName.isMetaTableName(tableName)) { + LOG.info("Can not get the namespace rs group config for meta table, since the" + + " meta table is not online yet, will use default group to assign meta first"); + } else { + LOG.warn("ClusterSchema is null, can only use default rsgroup, should not happen?"); + } + return Optional.empty(); + } + NamespaceDescriptor nd = clusterSchema.getNamespace(tableName.getNamespaceAsString()); + String groupNameOfNs = nd.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); + if (groupNameOfNs == null) { + return Optional.empty(); + } + return Optional.ofNullable(manager.getRSGroup(groupNameOfNs)); + } + + /** + * Fill the tables field for {@link RSGroupInfo}, for backward compatibility. + */ + @SuppressWarnings("deprecation") + public static RSGroupInfo fillTables(RSGroupInfo rsGroupInfo, Collection tds) { + RSGroupInfo newRsGroupInfo = new RSGroupInfo(rsGroupInfo); + Predicate filter; + if (rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { + filter = td -> { + Optional optGroupName = td.getRegionServerGroup(); + return !optGroupName.isPresent() || optGroupName.get().equals(RSGroupInfo.DEFAULT_GROUP); + }; + } else { + filter = td -> { + Optional optGroupName = td.getRegionServerGroup(); + return optGroupName.isPresent() && optGroupName.get().equals(newRsGroupInfo.getName()); + }; + } + tds.stream().filter(filter).map(TableDescriptor::getTableName) + .forEach(newRsGroupInfo::addTable); + return newRsGroupInfo; + } +} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java similarity index 100% rename from hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index cbfdd3f74497..a095d900b429 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.SyncReplicationState; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager; import org.apache.hadoop.hbase.security.access.AccessChecker; import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -490,4 +491,14 @@ public List executeRegionPlansWithThrottling(List plans) public AsyncClusterConnection getAsyncClusterConnection() { return null; } + + @Override + public RSGroupInfoManager getRSRSGroupInfoManager() { + return null; + } + + @Override + public boolean isBalancerOn() { + return false; + } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java index 6dc371149a71..47337f9f7c18 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertTrue; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -26,7 +27,6 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; @@ -82,7 +82,7 @@ public static void tearDownAfterClass() throws Exception { } @Test - public void testFavoredNodesPresentForRoundRobinAssignment() throws HBaseIOException { + public void testFavoredNodesPresentForRoundRobinAssignment() throws IOException { LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster()); balancer.initialize(); @@ -143,7 +143,7 @@ public void testFavoredNodesPresentForRoundRobinAssignment() throws HBaseIOExcep } @Test - public void testFavoredNodesPresentForRandomAssignment() throws HBaseIOException { + public void testFavoredNodesPresentForRandomAssignment() throws IOException { LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster()); balancer.initialize(); diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java similarity index 85% rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java index 570bb3abb3e9..4c00bcfcd0fa 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java @@ -28,6 +28,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; @@ -60,17 +61,13 @@ public class RSGroupableBalancerTestBase { static SecureRandom rand = new SecureRandom(); - static String[] groups = new String[] {RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4"}; + static String[] groups = new String[] { RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4" }; static TableName table0 = TableName.valueOf("dt0"); - static TableName[] tables = - new TableName[] { TableName.valueOf("dt1"), - TableName.valueOf("dt2"), - TableName.valueOf("dt3"), - TableName.valueOf("dt4")}; + static TableName[] tables = new TableName[] { TableName.valueOf("dt1"), TableName.valueOf("dt2"), + TableName.valueOf("dt3"), TableName.valueOf("dt4") }; static List servers; static Map groupMap; - static Map tableMap = new HashMap<>(); - static List tableDescs; + static Map tableDescs; int[] regionAssignment = new int[] { 2, 5, 7, 10, 4, 3, 1 }; static int regionId = 0; @@ -113,20 +110,19 @@ protected void assertClusterAsBalanced( /** * All regions have an assignment. */ - protected void assertImmediateAssignment(List regions, - List servers, - Map assignments) - throws IOException { + protected void assertImmediateAssignment(List regions, List servers, + Map assignments) throws IOException { for (RegionInfo region : regions) { assertTrue(assignments.containsKey(region)); ServerName server = assignments.get(region); TableName tableName = region.getTable(); - String groupName = getMockedGroupInfoManager().getRSGroupOfTable(tableName); + String groupName = + tableDescs.get(tableName).getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP); assertTrue(StringUtils.isNotEmpty(groupName)); RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(groupName); assertTrue("Region is not correctly assigned to group servers.", - gInfo.containsServer(server.getAddress())); + gInfo.containsServer(server.getAddress())); } } @@ -169,16 +165,13 @@ protected void assertRetainedAssignment( ServerName oldAssignedServer = existing.get(r); TableName tableName = r.getTable(); String groupName = - getMockedGroupInfoManager().getRSGroupOfTable(tableName); + tableDescs.get(tableName).getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP); assertTrue(StringUtils.isNotEmpty(groupName)); - RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup( - groupName); - assertTrue( - "Region is not correctly assigned to group servers.", - gInfo.containsServer(currentServer.getAddress())); - if (oldAssignedServer != null - && onlineHostNames.contains(oldAssignedServer - .getHostname())) { + RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(groupName); + assertTrue("Region is not correctly assigned to group servers.", + gInfo.containsServer(currentServer.getAddress())); + if (oldAssignedServer != null && + onlineHostNames.contains(oldAssignedServer.getHostname())) { // this region was previously assigned somewhere, and that // host is still around, then the host must have been is a // different group. @@ -358,13 +351,12 @@ protected static List generateServers(int numServers) { /** * Construct group info, with each group having at least one server. - * * @param servers the servers * @param groups the groups * @return the map */ - protected static Map constructGroupInfo( - List servers, String[] groups) { + protected static Map constructGroupInfo(List servers, + String[] groups) { assertTrue(servers != null); assertTrue(servers.size() >= groups.length); int index = 0; @@ -377,8 +369,7 @@ protected static Map constructGroupInfo( } while (index < servers.size()) { int grpIndex = rand.nextInt(groups.length); - groupMap.get(groups[grpIndex]).addServer( - servers.get(index).getAddress()); + groupMap.get(groups[grpIndex]).addServer(servers.get(index).getAddress()); index++; } return groupMap; @@ -389,29 +380,28 @@ protected static Map constructGroupInfo( * @param hasBogusTable there is a table that does not determine the group * @return the list of table descriptors */ - protected static List constructTableDesc(boolean hasBogusTable) { - List tds = Lists.newArrayList(); + protected static Map constructTableDesc(boolean hasBogusTable) { + Map tds = new HashMap<>(); int index = rand.nextInt(groups.length); for (int i = 0; i < tables.length; i++) { - TableDescriptor htd = TableDescriptorBuilder.newBuilder(tables[i]).build(); int grpIndex = (i + index) % groups.length; String groupName = groups[grpIndex]; - tableMap.put(tables[i], groupName); - tds.add(htd); + TableDescriptor htd = + TableDescriptorBuilder.newBuilder(tables[i]).setRegionServerGroup(groupName).build(); + tds.put(htd.getTableName(), htd); } if (hasBogusTable) { - tableMap.put(table0, ""); - tds.add(TableDescriptorBuilder.newBuilder(table0).build()); + tds.put(table0, TableDescriptorBuilder.newBuilder(table0).setRegionServerGroup("").build()); } return tds; } protected static MasterServices getMockedMaster() throws IOException { TableDescriptors tds = Mockito.mock(TableDescriptors.class); - Mockito.when(tds.get(tables[0])).thenReturn(tableDescs.get(0)); - Mockito.when(tds.get(tables[1])).thenReturn(tableDescs.get(1)); - Mockito.when(tds.get(tables[2])).thenReturn(tableDescs.get(2)); - Mockito.when(tds.get(tables[3])).thenReturn(tableDescs.get(3)); + Mockito.when(tds.get(tables[0])).thenReturn(tableDescs.get(tables[0])); + Mockito.when(tds.get(tables[1])).thenReturn(tableDescs.get(tables[1])); + Mockito.when(tds.get(tables[2])).thenReturn(tableDescs.get(tables[2])); + Mockito.when(tds.get(tables[3])).thenReturn(tableDescs.get(tables[3])); MasterServices services = Mockito.mock(HMaster.class); Mockito.when(services.getTableDescriptors()).thenReturn(tds); AssignmentManager am = Mockito.mock(AssignmentManager.class); @@ -430,13 +420,6 @@ public RSGroupInfo answer(InvocationOnMock invocation) throws Throwable { Mockito.when(gm.listRSGroups()).thenReturn( Lists.newLinkedList(groupMap.values())); Mockito.when(gm.isOnline()).thenReturn(true); - Mockito.when(gm.getRSGroupOfTable(Mockito.any())) - .thenAnswer(new Answer() { - @Override - public String answer(InvocationOnMock invocation) throws Throwable { - return tableMap.get(invocation.getArgument(0)); - } - }); return gm; } @@ -444,15 +427,16 @@ protected TableName getTableName(ServerName sn) throws IOException { TableName tableName = null; RSGroupInfoManager gm = getMockedGroupInfoManager(); RSGroupInfo groupOfServer = null; - for(RSGroupInfo gInfo : gm.listRSGroups()){ - if(gInfo.containsServer(sn.getAddress())){ + for (RSGroupInfo gInfo : gm.listRSGroups()) { + if (gInfo.containsServer(sn.getAddress())) { groupOfServer = gInfo; break; } } - for(TableDescriptor desc : tableDescs){ - if(gm.getRSGroupOfTable(desc.getTableName()).endsWith(groupOfServer.getName())){ + for (TableDescriptor desc : tableDescs.values()) { + Optional optGroupName = desc.getRegionServerGroup(); + if (optGroupName.isPresent() && optGroupName.get().endsWith(groupOfServer.getName())) { tableName = desc.getTableName(); } } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java similarity index 87% rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java index b60ca7ea2995..b2ea28b47cb2 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java @@ -98,33 +98,30 @@ public void testBalanceCluster() throws Exception { /** * Tests the bulk assignment used during cluster startup. - * - * Round-robin. Should yield a balanced cluster so same invariant as the - * load balancer holds, all servers holding either floor(avg) or - * ceiling(avg). + *

+ * Round-robin. Should yield a balanced cluster so same invariant as the load balancer holds, all + * servers holding either floor(avg) or ceiling(avg). */ @Test public void testBulkAssignment() throws Exception { List regions = randomRegions(25); - Map> assignments = loadBalancer - .roundRobinAssignment(regions, servers); - //test empty region/servers scenario - //this should not throw an NPE + Map> assignments = + loadBalancer.roundRobinAssignment(regions, servers); + // test empty region/servers scenario + // this should not throw an NPE loadBalancer.roundRobinAssignment(regions, Collections.emptyList()); - //test regular scenario + // test regular scenario assertTrue(assignments.keySet().size() == servers.size()); for (ServerName sn : assignments.keySet()) { List regionAssigned = assignments.get(sn); for (RegionInfo region : regionAssigned) { TableName tableName = region.getTable(); String groupName = - getMockedGroupInfoManager().getRSGroupOfTable(tableName); + tableDescs.get(tableName).getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP); assertTrue(StringUtils.isNotEmpty(groupName)); - RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup( - groupName); - assertTrue( - "Region is not correctly assigned to group servers.", - gInfo.containsServer(sn.getAddress())); + RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(groupName); + assertTrue("Region is not correctly assigned to group servers.", + gInfo.containsServer(sn.getAddress())); } } ArrayListMultimap loadMap = convertToGroupBasedMap(assignments); @@ -175,24 +172,25 @@ public void testRoundRobinAssignment() throws Exception { onlineServers.addAll(servers); List regions = randomRegions(25); int bogusRegion = 0; - for(RegionInfo region : regions){ - String group = tableMap.get(region.getTable()); - if("dg3".equals(group) || "dg4".equals(group)){ + for (RegionInfo region : regions) { + String group = tableDescs.get(region.getTable()).getRegionServerGroup() + .orElse(RSGroupInfo.DEFAULT_GROUP); + if ("dg3".equals(group) || "dg4".equals(group)) { bogusRegion++; } } Set

offlineServers = new HashSet
(); offlineServers.addAll(groupMap.get("dg3").getServers()); offlineServers.addAll(groupMap.get("dg4").getServers()); - for(Iterator it = onlineServers.iterator(); it.hasNext();){ + for (Iterator it = onlineServers.iterator(); it.hasNext();) { ServerName server = it.next(); Address address = server.getAddress(); - if(offlineServers.contains(address)){ + if (offlineServers.contains(address)) { it.remove(); } } - Map> assignments = loadBalancer - .roundRobinAssignment(regions, onlineServers); + Map> assignments = + loadBalancer.roundRobinAssignment(regions, onlineServers); assertEquals(bogusRegion, assignments.get(LoadBalancer.BOGUS_SERVER_NAME).size()); } } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java similarity index 98% rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java index e588a7e198b4..a4ae636a9a82 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java @@ -22,6 +22,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.IOException; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -32,7 +33,6 @@ import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.RegionMetrics; import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerName; @@ -98,7 +98,7 @@ private ServerMetrics mockServerMetricsWithReadRequests(ServerName server, * Test HBASE-20791 */ @Test - public void testBalanceCluster() throws HBaseIOException { + public void testBalanceCluster() throws IOException { // mock cluster State Map> clusterState = new HashMap>(); ServerName serverA = servers.get(0); diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java similarity index 100% rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestMigrateRSGroupInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestMigrateRSGroupInfo.java new file mode 100644 index 000000000000..a1bff14ad45d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestMigrateRSGroupInfo.java @@ -0,0 +1,183 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rsgroup; + +import static org.apache.hadoop.hbase.rsgroup.RSGroupInfoManagerImpl.META_FAMILY_BYTES; +import static org.apache.hadoop.hbase.rsgroup.RSGroupInfoManagerImpl.META_QUALIFIER_BYTES; +import static org.apache.hadoop.hbase.rsgroup.RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.concurrent.CountDownLatch; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.zookeeper.KeeperException; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Testcase for HBASE-22819 + */ +@RunWith(Parameterized.class) +@Category({ MediumTests.class }) +public class TestMigrateRSGroupInfo extends TestRSGroupsBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestMigrateRSGroupInfo.class); + + private static String TABLE_NAME_PREFIX = "Table_"; + + private static int NUM_TABLES = 10; + + private static byte[] FAMILY = Bytes.toBytes("family"); + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL.getConfiguration().setClass(HConstants.MASTER_IMPL, HMasterForTest.class, + HMaster.class); + setUpTestBeforeClass(); + for (int i = 0; i < NUM_TABLES; i++) { + TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME_PREFIX + i), FAMILY); + } + } + + @AfterClass + public static void tearDown() throws Exception { + tearDownAfterClass(); + } + + private static CountDownLatch RESUME = new CountDownLatch(1); + + public static final class HMasterForTest extends HMaster { + + public HMasterForTest(Configuration conf) throws IOException, KeeperException { + super(conf); + } + + @Override + public TableDescriptors getTableDescriptors() { + if (RESUME != null) { + for (StackTraceElement element : Thread.currentThread().getStackTrace()) { + if (element.getClassName().contains("RSGroupInfoManagerImpl")) { + try { + RESUME.await(); + } catch (InterruptedException e) { + } + RESUME = null; + break; + } + } + } + return super.getTableDescriptors(); + } + } + + @Test + public void testMigrate() throws IOException, InterruptedException { + setAdmin(); + String groupName = getNameWithoutIndex(name.getMethodName()); + addGroup(groupName, TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().size() - 1); + RSGroupInfo rsGroupInfo = rsGroupAdmin.getRSGroupInfo(groupName); + assertTrue(rsGroupInfo.getTables().isEmpty()); + for (int i = 0; i < NUM_TABLES; i++) { + rsGroupInfo.addTable(TableName.valueOf(TABLE_NAME_PREFIX + i)); + } + try (Table table = TEST_UTIL.getConnection().getTable(RSGROUP_TABLE_NAME)) { + RSGroupProtos.RSGroupInfo proto = ProtobufUtil.toProtoGroupInfo(rsGroupInfo); + Put p = new Put(Bytes.toBytes(rsGroupInfo.getName())); + p.addColumn(META_FAMILY_BYTES, META_QUALIFIER_BYTES, proto.toByteArray()); + table.put(p); + } + TEST_UTIL.getMiniHBaseCluster().stopMaster(0).join(); + RESUME = new CountDownLatch(1); + TEST_UTIL.getMiniHBaseCluster().startMaster(); + + // wait until we can get the rs group info for a table + TEST_UTIL.waitFor(30000, () -> { + try { + rsGroupAdmin.getRSGroupInfoOfTable(TableName.valueOf(TABLE_NAME_PREFIX + 0)); + return true; + } catch (IOException e) { + return false; + } + }); + // confirm that before migrating, we could still get the correct rs group for a table. + for (int i = 0; i < NUM_TABLES; i++) { + RSGroupInfo info = + rsGroupAdmin.getRSGroupInfoOfTable(TableName.valueOf(TABLE_NAME_PREFIX + i)); + assertEquals(rsGroupInfo.getName(), info.getName()); + assertEquals(NUM_TABLES, info.getTables().size()); + } + RESUME.countDown(); + TEST_UTIL.waitFor(60000, () -> { + for (int i = 0; i < NUM_TABLES; i++) { + TableDescriptor td; + try { + td = TEST_UTIL.getAdmin().getDescriptor(TableName.valueOf(TABLE_NAME_PREFIX + i)); + } catch (IOException e) { + return false; + } + if (!rsGroupInfo.getName().equals(td.getRegionServerGroup().orElse(null))) { + return false; + } + } + return true; + }); + // make sure that we persist the result to hbase, where we delete all the tables in the rs + // group. + TEST_UTIL.waitFor(30000, () -> { + try (Table table = TEST_UTIL.getConnection().getTable(RSGROUP_TABLE_NAME)) { + Result result = table.get(new Get(Bytes.toBytes(rsGroupInfo.getName()))); + RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo + .parseFrom(result.getValue(META_FAMILY_BYTES, META_QUALIFIER_BYTES)); + RSGroupInfo gi = ProtobufUtil.toGroupInfo(proto); + return gi.getTables().isEmpty(); + } + }); + // make sure that the migrate thread has quit. + TEST_UTIL.waitFor(30000, () -> Thread.getAllStackTraces().keySet().stream() + .noneMatch(t -> t.getName().equals(RSGroupInfoManagerImpl.MIGRATE_THREAD_NAME))); + // make sure we could still get the correct rs group info after migration + for (int i = 0; i < NUM_TABLES; i++) { + RSGroupInfo info = + rsGroupAdmin.getRSGroupInfoOfTable(TableName.valueOf(TABLE_NAME_PREFIX + i)); + assertEquals(rsGroupInfo.getName(), info.getName()); + assertEquals(NUM_TABLES, info.getTables().size()); + } + } +} diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java similarity index 100% rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java similarity index 90% rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java index 27511e30794a..fbd314d9fb60 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java @@ -29,7 +29,6 @@ import java.util.Map; import java.util.Set; import java.util.SortedSet; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.NamespaceDescriptor; @@ -38,6 +37,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -58,11 +58,14 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; +@RunWith(Parameterized.class) @Category({ MediumTests.class }) public class TestRSGroupsAdmin1 extends TestRSGroupsBase { @@ -125,7 +128,7 @@ public void testBogusArgs() throws Exception { } try { - rsGroupAdmin.moveTables(Sets.newHashSet(TableName.valueOf("bogustable")), "bogus"); + rsGroupAdmin.setRSGroupForTables(Sets.newHashSet(TableName.valueOf("bogustable")), "bogus"); fail("Expected move with bogus group to fail"); } catch (ConstraintException | TableNotFoundException ex) { // expected @@ -153,12 +156,14 @@ public void testNamespaceConstraint() throws Exception { String nsName = tablePrefix + "_foo"; String groupName = tablePrefix + "_foo"; LOG.info("testNamespaceConstraint"); - rsGroupAdmin.addRSGroup(groupName); + addGroup(groupName, 1); assertTrue(observer.preAddRSGroupCalled); assertTrue(observer.postAddRSGroupCalled); admin.createNamespace(NamespaceDescriptor.create(nsName) .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, groupName).build()); + RSGroupInfo rsGroupInfo = rsGroupAdmin.getRSGroupInfo(groupName); + rsGroupAdmin.moveServers(rsGroupInfo.getServers(), RSGroupInfo.DEFAULT_GROUP); // test removing a referenced group try { rsGroupAdmin.removeRSGroup(groupName); @@ -229,7 +234,7 @@ public void testFailRemoveGroup() throws IOException, InterruptedException { int initNumGroups = rsGroupAdmin.listRSGroups().size(); addGroup("bar", 3); TEST_UTIL.createTable(tableName, Bytes.toBytes("f")); - rsGroupAdmin.moveTables(Sets.newHashSet(tableName), "bar"); + rsGroupAdmin.setRSGroupForTables(Sets.newHashSet(tableName), "bar"); RSGroupInfo barGroup = rsGroupAdmin.getRSGroupInfo("bar"); // group is not empty therefore it should fail try { @@ -244,7 +249,7 @@ public void testFailRemoveGroup() throws IOException, InterruptedException { } catch (IOException e) { } - rsGroupAdmin.moveTables(barGroup.getTables(), RSGroupInfo.DEFAULT_GROUP); + rsGroupAdmin.setRSGroupForTables(barGroup.getTables(), RSGroupInfo.DEFAULT_GROUP); try { rsGroupAdmin.removeRSGroup(barGroup.getName()); fail("Expected move servers to fail"); @@ -259,10 +264,12 @@ public void testFailRemoveGroup() throws IOException, InterruptedException { @Test public void testMultiTableMove() throws Exception { - final TableName tableNameA = TableName.valueOf(tablePrefix + name.getMethodName() + "A"); - final TableName tableNameB = TableName.valueOf(tablePrefix + name.getMethodName() + "B"); + final TableName tableNameA = TableName.valueOf(tablePrefix + + getNameWithoutIndex(name.getMethodName()) + "A"); + final TableName tableNameB = TableName.valueOf(tablePrefix + + getNameWithoutIndex(name.getMethodName()) + "B"); final byte[] familyNameBytes = Bytes.toBytes("f"); - String newGroupName = getGroupName(name.getMethodName()); + String newGroupName = getGroupName(getNameWithoutIndex(name.getMethodName())); final RSGroupInfo newGroup = addGroup(newGroupName, 1); TEST_UTIL.createTable(tableNameA, familyNameBytes); @@ -290,7 +297,7 @@ public boolean evaluate() throws Exception { assertTrue(tableGrpB.getName().equals(RSGroupInfo.DEFAULT_GROUP)); // change table's group LOG.info("Moving table [" + tableNameA + "," + tableNameB + "] to " + newGroup.getName()); - rsGroupAdmin.moveTables(Sets.newHashSet(tableNameA, tableNameB), newGroup.getName()); + rsGroupAdmin.setRSGroupForTables(Sets.newHashSet(tableNameA, tableNameB), newGroup.getName()); // verify group change Assert.assertEquals(newGroup.getName(), @@ -314,7 +321,7 @@ public boolean evaluate() throws Exception { @Test public void testTableMoveTruncateAndDrop() throws Exception { final byte[] familyNameBytes = Bytes.toBytes("f"); - String newGroupName = getGroupName(name.getMethodName()); + String newGroupName = getGroupName(getNameWithoutIndex(name.getMethodName())); final RSGroupInfo newGroup = addGroup(newGroupName, 2); TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, 5); @@ -331,11 +338,12 @@ public boolean evaluate() throws Exception { }); RSGroupInfo tableGrp = rsGroupAdmin.getRSGroupInfoOfTable(tableName); + LOG.info("got table group info is {}", tableGrp); assertTrue(tableGrp.getName().equals(RSGroupInfo.DEFAULT_GROUP)); // change table's group LOG.info("Moving table " + tableName + " to " + newGroup.getName()); - rsGroupAdmin.moveTables(Sets.newHashSet(tableName), newGroup.getName()); + rsGroupAdmin.setRSGroupForTables(Sets.newHashSet(tableName), newGroup.getName()); // verify group change Assert.assertEquals(newGroup.getName(), @@ -368,14 +376,14 @@ public boolean evaluate() throws Exception { TEST_UTIL.deleteTable(tableName); Assert.assertEquals(0, rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getTables().size()); - assertTrue(observer.preMoveTablesCalled); - assertTrue(observer.postMoveTablesCalled); + assertTrue(observer.preSetRSGroupForTablesCalled); + assertTrue(observer.postSetRSGroupForTablesCalled); } @Test public void testDisabledTableMove() throws Exception { final byte[] familyNameBytes = Bytes.toBytes("f"); - String newGroupName = getGroupName(name.getMethodName()); + String newGroupName = getGroupName(getNameWithoutIndex(name.getMethodName())); final RSGroupInfo newGroup = addGroup(newGroupName, 2); TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, 5); @@ -398,7 +406,7 @@ public boolean evaluate() throws Exception { // change table's group LOG.info("Moving table " + tableName + " to " + newGroup.getName()); - rsGroupAdmin.moveTables(Sets.newHashSet(tableName), newGroup.getName()); + rsGroupAdmin.setRSGroupForTables(Sets.newHashSet(tableName), newGroup.getName()); // verify group change Assert.assertEquals(newGroup.getName(), @@ -407,8 +415,8 @@ public boolean evaluate() throws Exception { @Test public void testNonExistentTableMove() throws Exception { - TableName tableName = TableName.valueOf(tablePrefix + name.getMethodName()); - + TableName tableName = TableName.valueOf(tablePrefix + + getNameWithoutIndex(name.getMethodName())); RSGroupInfo tableGrp = rsGroupAdmin.getRSGroupInfoOfTable(tableName); assertNull(tableGrp); @@ -418,15 +426,16 @@ public void testNonExistentTableMove() throws Exception { LOG.info("Moving table " + tableName + " to " + RSGroupInfo.DEFAULT_GROUP); try { - rsGroupAdmin.moveTables(Sets.newHashSet(tableName), RSGroupInfo.DEFAULT_GROUP); + rsGroupAdmin.setRSGroupForTables(Sets.newHashSet(tableName), RSGroupInfo.DEFAULT_GROUP); fail("Table " + tableName + " shouldn't have been successfully moved."); } catch (IOException ex) { assertTrue(ex instanceof TableNotFoundException); } try { - rsGroupAdmin.moveServersAndTables(Sets.newHashSet(Address.fromParts("bogus", 123)), - Sets.newHashSet(tableName), RSGroupInfo.DEFAULT_GROUP); + rsGroupAdmin.setRSGroupForTables(Sets.newHashSet(tableName), RSGroupInfo.DEFAULT_GROUP); + rsGroupAdmin.moveServers(Sets.newHashSet(Address.fromParts("bogus", 123)), + RSGroupInfo.DEFAULT_GROUP); fail("Table " + tableName + " shouldn't have been successfully moved."); } catch (IOException ex) { assertTrue(ex instanceof TableNotFoundException); @@ -517,14 +526,4 @@ public boolean evaluate() throws Exception { // Cleanup TEST_UTIL.deleteTable(tn1); } - - private void toggleQuotaCheckAndRestartMiniCluster(boolean enable) throws Exception { - TEST_UTIL.shutdownMiniCluster(); - TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, enable); - TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE - 1); - TEST_UTIL.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, - NUM_SLAVES_BASE - 1); - TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); - initialize(); - } } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java similarity index 84% rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java index 6553a85c9384..262484701392 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java @@ -17,11 +17,9 @@ */ package org.apache.hadoop.hbase.rsgroup; -import static org.apache.hadoop.hbase.rsgroup.RSGroupAdminServer.DEFAULT_MAX_RETRY_VALUE; import static org.apache.hadoop.hbase.util.Threads.sleep; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -56,12 +54,15 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; +@RunWith(Parameterized.class) @Category({ LargeTests.class }) public class TestRSGroupsAdmin2 extends TestRSGroupsBase { @@ -335,8 +336,9 @@ public boolean evaluate() throws Exception { // test fail bogus server move try { - rsGroupAdmin.moveServersAndTables(Sets.newHashSet(Address.fromString("foo:9999")), - Sets.newHashSet(tableName), newGroup.getName()); + rsGroupAdmin.moveServers(Sets.newHashSet(Address.fromString("foo:9999")), + newGroup.getName()); + rsGroupAdmin.setRSGroupForTables(Sets.newHashSet(tableName), newGroup.getName()); fail("Bogus servers shouldn't have been successfully moved."); } catch (IOException ex) { String exp = "Source RSGroup for server foo:9999 does not exist."; @@ -345,8 +347,9 @@ public boolean evaluate() throws Exception { } // test move when src = dst - rsGroupAdmin.moveServersAndTables(Sets.newHashSet(targetServer.getAddress()), - Sets.newHashSet(tableName), RSGroupInfo.DEFAULT_GROUP); + rsGroupAdmin.moveServers(Sets.newHashSet(targetServer.getAddress()), + RSGroupInfo.DEFAULT_GROUP); + rsGroupAdmin.setRSGroupForTables(Sets.newHashSet(tableName), RSGroupInfo.DEFAULT_GROUP); // verify default group info Assert.assertEquals(oldDefaultGroupServerSize, @@ -382,8 +385,8 @@ public boolean evaluate() throws Exception { // move targetServer and table to newGroup LOG.info("moving server and table to newGroup"); - rsGroupAdmin.moveServersAndTables(Sets.newHashSet(targetServer.getAddress()), - Sets.newHashSet(tableName), newGroup.getName()); + rsGroupAdmin.moveServers(Sets.newHashSet(targetServer.getAddress()), newGroup.getName()); + rsGroupAdmin.setRSGroupForTables(Sets.newHashSet(tableName), newGroup.getName()); // verify group change Assert.assertEquals(newGroup.getName(), @@ -408,10 +411,14 @@ public boolean evaluate() throws Exception { assertTrue(newGroupTables.contains(tableName)); // verify that all region still assgin on targetServer - Assert.assertEquals(5, getTableServerRegionMap().get(tableName).get(targetServer).size()); - - assertTrue(observer.preMoveServersAndTables); - assertTrue(observer.postMoveServersAndTables); + // TODO: uncomment after we reimplement moveServersAndTables, now the implementation is + // moveServers first and then moveTables, so the region will be moved to other region servers. + // Assert.assertEquals(5, getTableServerRegionMap().get(tableName).get(targetServer).size()); + + assertTrue(observer.preSetRSGroupForTablesCalled); + assertTrue(observer.preMoveServersCalled); + assertTrue(observer.postSetRSGroupForTablesCalled); + assertTrue(observer.postMoveServersCalled); } @Test @@ -426,10 +433,10 @@ public void testMoveServersFromDefaultGroup() throws Exception { // test remove all servers from default try { rsGroupAdmin.moveServers(defaultGroup.getServers(), fooGroup.getName()); - fail(RSGroupAdminServer.KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE); + fail(RSGroupInfoManagerImpl.KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE); } catch (ConstraintException ex) { assertTrue( - ex.getMessage().contains(RSGroupAdminServer.KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE)); + ex.getMessage().contains(RSGroupInfoManagerImpl.KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE)); } // test success case, remove one server from default ,keep at least one server @@ -503,61 +510,6 @@ public boolean evaluate() { }); } - @Test - public void testFailedMoveBeforeRetryExhaustedWhenMoveTable() throws Exception { - final RSGroupInfo newGroup = addGroup(getGroupName(name.getMethodName()), 1); - Pair gotPair = createTableWithRegionSplitting(newGroup, - 5); - - // move table to group - Thread t2 = new Thread(() -> { - LOG.info("thread2 start running, to move regions"); - try { - rsGroupAdmin.moveTables(Sets.newHashSet(tableName), newGroup.getName()); - } catch (IOException e) { - LOG.error("move server error", e); - } - }); - t2.start(); - - // start thread to recover region state - final ServerName ss = gotPair.getFirst(); - final RegionStateNode rsn = gotPair.getSecond(); - AtomicBoolean changed = new AtomicBoolean(false); - - Thread t1 = recoverRegionStateThread(ss, server -> { - List regions = master.getAssignmentManager().getRegionsOnServer(ss); - List tableRegions = new ArrayList<>(); - for (RegionInfo regionInfo : regions) { - if (regionInfo.getTable().equals(tableName)) { - tableRegions.add(regionInfo); - } - } - return tableRegions; - }, rsn, changed); - t1.start(); - - t1.join(); - t2.join(); - - TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { - @Override - public boolean evaluate() { - if (changed.get()) { - boolean serverHasTableRegions = false; - for (RegionInfo regionInfo : master.getAssignmentManager().getRegionsOnServer(ss)) { - if (regionInfo.getTable().equals(tableName)) { - serverHasTableRegions = true; - break; - } - } - return !serverHasTableRegions && !rsn.getRegionLocation().equals(ss); - } - return false; - } - }); - } - private Thread recoverRegionStateThread(T owner, Function> getRegions, RegionStateNode rsn, AtomicBoolean changed){ return new Thread(() -> { @@ -566,7 +518,8 @@ private Thread recoverRegionStateThread(T owner, Function regions = getRegions.apply(owner); LOG.debug("server table region size is:{}", regions.size()); assert regions.size() >= 1; @@ -650,50 +603,6 @@ private Pair randomlySetRegionState(RSGroupInfo gro return new Pair<>(srcServer, rsn); } - @Test - public void testFailedMoveTablesAndRepair() throws Exception{ - // This UT calls moveTables() twice to test the idempotency of it. - // The first time, movement fails because a region is made in SPLITTING state - // which will not be moved. - // The second time, the region state is OPEN and check if all - // regions on target group servers after the call. - final RSGroupInfo newGroup = addGroup(getGroupName(name.getMethodName()), 1); - Iterator iterator = newGroup.getServers().iterator(); - Address newGroupServer1 = (Address) iterator.next(); - - // create table - // randomly set a region state to SPLITTING to make move abort - Pair gotPair = createTableWithRegionSplitting(newGroup, - new Random().nextInt(8) + 4); - RegionStateNode rsn = gotPair.getSecond(); - - // move table to newGroup and check regions - try { - rsGroupAdmin.moveTables(Sets.newHashSet(tableName), newGroup.getName()); - fail("should get IOException when retry exhausted but there still exists failed moved " - + "regions"); - }catch (Exception e){ - assertTrue(e.getMessage().contains( - gotPair.getSecond().getRegionInfo().getRegionNameAsString())); - } - for(RegionInfo regionInfo : master.getAssignmentManager().getAssignedRegions()){ - if (regionInfo.getTable().equals(tableName) && regionInfo.equals(rsn.getRegionInfo())) { - assertNotEquals(master.getAssignmentManager().getRegionStates() - .getRegionServerOfRegion(regionInfo).getAddress(), newGroupServer1); - } - } - - // retry move table to newGroup and check if all regions are corrected - rsn.setState(RegionState.State.OPEN); - rsGroupAdmin.moveTables(Sets.newHashSet(tableName), newGroup.getName()); - for(RegionInfo regionInfo : master.getAssignmentManager().getAssignedRegions()){ - if (regionInfo.getTable().equals(tableName)) { - assertEquals(master.getAssignmentManager().getRegionStates() - .getRegionServerOfRegion(regionInfo).getAddress(), newGroupServer1); - } - } - } - @Test public void testFailedMoveServersAndRepair() throws Exception{ // This UT calls moveServers() twice to test the idempotency of it. @@ -757,8 +666,8 @@ public void testFailedMoveServersTablesAndRepair() throws Exception{ // move server and table to newGroup and check regions try { - rsGroupAdmin.moveServersAndTables(Sets.newHashSet(srcServer.getAddress()), - Sets.newHashSet(table2), newGroup.getName()); + rsGroupAdmin.moveServers(Sets.newHashSet(srcServer.getAddress()), newGroup.getName()); + rsGroupAdmin.setRSGroupForTables(Sets.newHashSet(table2), newGroup.getName()); fail("should get IOException when retry exhausted but there still exists failed moved " + "regions"); }catch (Exception e){ @@ -775,8 +684,8 @@ public void testFailedMoveServersTablesAndRepair() throws Exception{ // retry moveServersAndTables to newGroup and check if all regions on srcServer belongs to // table2 rsn.setState(RegionState.State.OPEN); - rsGroupAdmin.moveServersAndTables(Sets.newHashSet(srcServer.getAddress()), - Sets.newHashSet(table2), newGroup.getName()); + rsGroupAdmin.moveServers(Sets.newHashSet(srcServer.getAddress()), newGroup.getName()); + rsGroupAdmin.setRSGroupForTables(Sets.newHashSet(table2), newGroup.getName()); for(RegionInfo regionsInfo : master.getAssignmentManager().getRegionsOnServer(srcServer)){ assertEquals(regionsInfo.getTable(), table2); } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java similarity index 87% rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java index 67f5c7ee7577..be9318600d9b 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java @@ -42,11 +42,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.collect.Sets; - +@RunWith(Parameterized.class) @Category({ MediumTests.class }) public class TestRSGroupsBalance extends TestRSGroupsBase { @@ -82,7 +83,8 @@ public void testGroupBalance() throws Exception { String newGroupName = getGroupName(name.getMethodName()); addGroup(newGroupName, 3); - final TableName tableName = TableName.valueOf(tablePrefix + "_ns", name.getMethodName()); + final TableName tableName = TableName.valueOf(tablePrefix + "_ns", + getNameWithoutIndex(name.getMethodName())); admin.createNamespace(NamespaceDescriptor.create(tableName.getNamespaceAsString()) .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, newGroupName).build()); final TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) @@ -153,19 +155,20 @@ public boolean evaluate() throws Exception { @Test public void testMisplacedRegions() throws Exception { - final TableName tableName = TableName.valueOf(tablePrefix + "_testMisplacedRegions"); - LOG.info("testMisplacedRegions"); + String namespace = tablePrefix + "_" + getNameWithoutIndex(name.getMethodName()); + TEST_UTIL.getAdmin().createNamespace(NamespaceDescriptor.create(namespace).build()); + final TableName tableName = TableName.valueOf(namespace, tablePrefix + "_" + + getNameWithoutIndex(name.getMethodName())); - final RSGroupInfo RSGroupInfo = addGroup("testMisplacedRegions", 1); + final RSGroupInfo rsGroupInfo = addGroup(getGroupName(name.getMethodName()), 1); TEST_UTIL.createMultiRegionTable(tableName, new byte[] { 'f' }, 15); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); - - rsGroupAdminEndpoint.getGroupInfoManager().moveTables(Sets.newHashSet(tableName), - RSGroupInfo.getName()); + TEST_UTIL.getAdmin().modifyNamespace(NamespaceDescriptor.create(namespace) + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, rsGroupInfo.getName()).build()); admin.balancerSwitch(true, true); - assertTrue(rsGroupAdmin.balanceRSGroup(RSGroupInfo.getName())); + assertTrue(rsGroupAdmin.balanceRSGroup(rsGroupInfo.getName())); admin.balancerSwitch(false, true); assertTrue(observer.preBalanceRSGroupCalled); assertTrue(observer.postBalanceRSGroupCalled); @@ -174,7 +177,7 @@ public void testMisplacedRegions() throws Exception { @Override public boolean evaluate() throws Exception { ServerName serverName = - ServerName.valueOf(RSGroupInfo.getServers().iterator().next().toString(), 1); + ServerName.valueOf(rsGroupInfo.getServers().iterator().next().toString(), 1); return admin.getConnection().getAdmin().getRegions(serverName).size() == 15; } }); diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java similarity index 84% rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java index c5520cf11f1c..bbfba100fbd0 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.Arrays; import java.util.EnumSet; import java.util.HashSet; import java.util.LinkedList; @@ -29,6 +30,8 @@ import java.util.Random; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.ForkJoinPool; +import java.util.function.Supplier; import java.util.regex.Pattern; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; @@ -43,8 +46,10 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.AsyncAdmin; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TestAsyncAdminBase; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; @@ -55,9 +60,12 @@ import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.quotas.QuotaUtil; import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Before; import org.junit.Rule; import org.junit.rules.TestName; +import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -73,10 +81,9 @@ public abstract class TestRSGroupsBase { protected final static Random rand = new Random(); //shared, cluster type specific - protected static HBaseTestingUtility TEST_UTIL; + protected static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); protected static Admin admin; protected static HBaseCluster cluster; - protected static RSGroupAdmin rsGroupAdmin; protected static HMaster master; protected boolean INIT = false; protected static RSGroupAdminEndpoint rsGroupAdminEndpoint; @@ -91,8 +98,49 @@ public abstract class TestRSGroupsBase { public TestName name = new TestName(); protected TableName tableName; + protected Admin rsGroupAdmin; + + @Parameterized.Parameter + public Supplier getAdmin; + + private static RSGroupAdminClient getRSGroupAdmin(){ + try { + return new VerifyingRSGroupAdminClient( + new RSGroupAdminClient(TEST_UTIL.getConnection()), TEST_UTIL.getConfiguration()); + } catch (IOException e) { + LOG.error("Get group admin failed", e); + return null; + } + } + + private static Admin getAdmin(){ + try { + return TEST_UTIL.getAdmin(); + } catch (IOException e) { + LOG.error("Get hbase admin failed", e); + return null; + } + } + + public static Object resetAdminConnection(Object admin) { + if(admin instanceof RSGroupAdminClient) { + return getRSGroupAdmin(); + }else { + return getAdmin(); + } + } + + public static String getNameWithoutIndex(String name) { + return name.split("\\[")[0]; + } + + @Parameterized.Parameters + public static List params() { + return Arrays.asList(new Supplier[] { TestRSGroupsBase::getRSGroupAdmin }, + new Supplier[] { TestRSGroupsBase::getAdmin }); + } + public static void setUpTestBeforeClass() throws Exception { - TEST_UTIL = new HBaseTestingUtility(); TEST_UTIL.getConfiguration().setFloat( "hbase.master.balancer.stochastic.tableSkewCost", 6000); TEST_UTIL.getConfiguration().set( @@ -110,6 +158,10 @@ public static void setUpTestBeforeClass() throws Exception { initialize(); } + public void setAdmin(){ + rsGroupAdmin = (Admin) getAdmin.get(); + } + protected static void initialize() throws Exception { admin = TEST_UTIL.getAdmin(); cluster = TEST_UTIL.getHBaseCluster(); @@ -124,8 +176,6 @@ public boolean evaluate() throws Exception { } }); admin.balancerSwitch(false, true); - rsGroupAdmin = new VerifyingRSGroupAdminClient( - new RSGroupAdminClient(TEST_UTIL.getConnection()), TEST_UTIL.getConfiguration()); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); observer = (CPMasterObserver) host.findCoprocessor(CPMasterObserver.class.getName()); rsGroupAdminEndpoint = (RSGroupAdminEndpoint) @@ -137,8 +187,9 @@ public static void tearDownAfterClass() throws Exception { } public void setUpBeforeMethod() throws Exception { + setAdmin(); LOG.info(name.getMethodName()); - tableName = TableName.valueOf(tablePrefix + "_" + name.getMethodName()); + tableName = TableName.valueOf(tablePrefix + "_" + name.getMethodName().split("\\[")[0]); if (!INIT) { INIT = true; tearDownAfterMethod(); @@ -190,8 +241,8 @@ public RSGroupInfo addGroup(String groupName, int serverCount) RSGroupInfo defaultInfo = rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP); rsGroupAdmin.addRSGroup(groupName); Set
set = new HashSet<>(); - for(Address server: defaultInfo.getServers()) { - if(set.size() == serverCount) { + for (Address server : defaultInfo.getServers()) { + if (set.size() == serverCount) { break; } set.add(server); @@ -203,7 +254,7 @@ public RSGroupInfo addGroup(String groupName, int serverCount) public void removeGroup(String groupName) throws IOException { RSGroupInfo groupInfo = rsGroupAdmin.getRSGroupInfo(groupName); - rsGroupAdmin.moveTables(groupInfo.getTables(), RSGroupInfo.DEFAULT_GROUP); + rsGroupAdmin.setRSGroupForTables(groupInfo.getTables(), RSGroupInfo.DEFAULT_GROUP); rsGroupAdmin.moveServers(groupInfo.getServers(), RSGroupInfo.DEFAULT_GROUP); rsGroupAdmin.removeRSGroup(groupName); } @@ -224,10 +275,10 @@ public void deleteNamespaceIfNecessary() throws IOException { } public void deleteGroups() throws IOException { - RSGroupAdmin groupAdmin = new RSGroupAdminClient(TEST_UTIL.getConnection()); + RSGroupAdminClient groupAdmin = new RSGroupAdminClient(TEST_UTIL.getConnection()); for(RSGroupInfo group: groupAdmin.listRSGroups()) { if(!group.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { - groupAdmin.moveTables(group.getTables(), RSGroupInfo.DEFAULT_GROUP); + groupAdmin.setRSGroupForTables(group.getTables(), RSGroupInfo.DEFAULT_GROUP); groupAdmin.moveServers(group.getServers(), RSGroupInfo.DEFAULT_GROUP); groupAdmin.removeRSGroup(group.getName()); } @@ -291,7 +342,8 @@ public int getNumServers() throws IOException { } public String getGroupName(String baseName) { - return groupPrefix + "_" + baseName + "_" + rand.nextInt(Integer.MAX_VALUE); + return groupPrefix + "_" + getNameWithoutIndex(baseName) + "_" + + rand.nextInt(Integer.MAX_VALUE); } /** @@ -304,6 +356,17 @@ protected ServerName getServerName(Address addr) { .findFirst().get(); } + protected void toggleQuotaCheckAndRestartMiniCluster(boolean enable) throws Exception { + TEST_UTIL.shutdownMiniCluster(); + TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, enable); + TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE - 1); + TEST_UTIL.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, + NUM_SLAVES_BASE - 1); + TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); + initialize(); + rsGroupAdmin = (Admin) resetAdminConnection(rsGroupAdmin); + } + public static class CPMasterObserver implements MasterCoprocessor, MasterObserver { boolean preBalanceRSGroupCalled = false; boolean postBalanceRSGroupCalled = false; @@ -327,6 +390,8 @@ public static class CPMasterObserver implements MasterCoprocessor, MasterObserve boolean postListRSGroupsCalled = false; boolean preGetRSGroupInfoOfServerCalled = false; boolean postGetRSGroupInfoOfServerCalled = false; + boolean preSetRSGroupForTablesCalled = false; + boolean postSetRSGroupForTablesCalled = false; public void resetFlags() { preBalanceRSGroupCalled = false; @@ -351,6 +416,8 @@ public void resetFlags() { postListRSGroupsCalled = false; preGetRSGroupInfoOfServerCalled = false; postGetRSGroupInfoOfServerCalled = false; + preSetRSGroupForTablesCalled = false; + postSetRSGroupForTablesCalled = false; } @Override @@ -491,6 +558,18 @@ public void postGetRSGroupInfoOfServer(final ObserverContext ctx, + final Set tables, final String groupName) throws IOException { + preSetRSGroupForTablesCalled = true; + } + + @Override + public void postSetRSGroupForTables(final ObserverContext ctx, + final Set tables, final String groupName) throws IOException { + postSetRSGroupForTablesCalled = true; + } } } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java similarity index 79% rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java index e3cb54e1cdfc..9024e263ac98 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java @@ -20,11 +20,8 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; import java.util.List; import java.util.Set; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -32,6 +29,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; @@ -48,11 +46,14 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +@RunWith(Parameterized.class) @Category({ MediumTests.class }) public class TestRSGroupsBasics extends TestRSGroupsBase { @@ -88,6 +89,8 @@ public void testBasicStartUp() throws IOException { assertEquals(NUM_SLAVES_BASE, defaultInfo.getServers().size()); // Assignment of meta and rsgroup regions. int count = master.getAssignmentManager().getRegionStates().getRegionAssignments().size(); + LOG.info("regions assignments are" + + master.getAssignmentManager().getRegionStates().getRegionAssignments().toString()); // 2 (meta and rsgroup) assertEquals(2, count); } @@ -136,45 +139,6 @@ public boolean evaluate() throws Exception { Assert.assertEquals(1, admin.getRegions(targetServer).size()); } - @Test - public void testCreateWhenRsgroupNoOnlineServers() throws Exception { - LOG.info("testCreateWhenRsgroupNoOnlineServers"); - - // set rsgroup has no online servers and test create table - final RSGroupInfo appInfo = addGroup("appInfo", 1); - Iterator
iterator = appInfo.getServers().iterator(); - List serversToDecommission = new ArrayList<>(); - ServerName targetServer = getServerName(iterator.next()); - assertTrue(master.getServerManager().getOnlineServers().containsKey(targetServer)); - serversToDecommission.add(targetServer); - admin.decommissionRegionServers(serversToDecommission, true); - assertEquals(1, admin.listDecommissionedRegionServers().size()); - - final TableName tableName = TableName.valueOf(tablePrefix + "_ns", name.getMethodName()); - admin.createNamespace(NamespaceDescriptor.create(tableName.getNamespaceAsString()) - .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, appInfo.getName()).build()); - final TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); - try { - admin.createTable(desc); - fail("Shouldn't create table successfully!"); - } catch (Exception e) { - LOG.debug("create table error", e); - } - - // recommission and test create table - admin.recommissionRegionServer(targetServer, null); - assertEquals(0, admin.listDecommissionedRegionServers().size()); - admin.createTable(desc); - // wait for created table to be assigned - TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { - return getTableRegionMap().get(desc.getTableName()) != null; - } - }); - } - @Test public void testDefaultNamespaceCreateAndAssign() throws Exception { LOG.info("testDefaultNamespaceCreateAndAssign"); @@ -207,6 +171,7 @@ public void testCloneSnapshot() throws Exception { // clone admin.cloneSnapshot(snapshotName, clonedTableName); + admin.deleteSnapshot(snapshotName); } @Test @@ -293,27 +258,13 @@ public boolean evaluate() throws Exception { @Test public void testRSGroupsWithHBaseQuota() throws Exception { - TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); - restartHBaseCluster(); - try { - TEST_UTIL.waitFor(90000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { - return admin.isTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME); - } - }); - } finally { - TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, false); - restartHBaseCluster(); - } - } - - private void restartHBaseCluster() throws Exception { - LOG.info("\n\nShutting down cluster"); - TEST_UTIL.shutdownMiniHBaseCluster(); - LOG.info("\n\nSleeping a bit"); - Thread.sleep(2000); - TEST_UTIL.restartHBaseCluster(NUM_SLAVES_BASE - 1); - initialize(); + toggleQuotaCheckAndRestartMiniCluster(true); + TEST_UTIL.waitFor(90000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return admin.isTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME); + } + }); + toggleQuotaCheckAndRestartMiniCluster(false); } } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java similarity index 97% rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java index 503a1a69f61d..cb7831d340be 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java @@ -47,11 +47,14 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; +@RunWith(Parameterized.class) @Category({ MediumTests.class }) public class TestRSGroupsKillRS extends TestRSGroupsBase { @@ -84,7 +87,8 @@ public void afterMethod() throws Exception { @Test public void testKillRS() throws Exception { RSGroupInfo appInfo = addGroup("appInfo", 1); - final TableName tableName = TableName.valueOf(tablePrefix + "_ns", name.getMethodName()); + final TableName tableName = TableName.valueOf(tablePrefix + "_ns", + getNameWithoutIndex(name.getMethodName())); admin.createNamespace(NamespaceDescriptor.create(tableName.getNamespaceAsString()) .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, appInfo.getName()).build()); final TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) @@ -157,7 +161,7 @@ public void testKillAllRSInGroup() throws Exception { TEST_UTIL.loadTable(t, Bytes.toBytes("f")); Set toAddTables = new HashSet<>(); toAddTables.add(tableName); - rsGroupAdmin.moveTables(toAddTables, groupName); + rsGroupAdmin.setRSGroupForTables(toAddTables, groupName); assertTrue(rsGroupAdmin.getRSGroupInfo(groupName).getTables().contains(tableName)); TEST_UTIL.waitTableAvailable(tableName, 30000); diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java similarity index 92% rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java index 39cf164ecf4f..6b1c210f7bee 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.rsgroup; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -57,7 +56,7 @@ // online. In new master, RSGroupInfoManagerImpl gets the data from zk and waits for the expected // assignment with a timeout. @Category(MediumTests.class) -public class TestRSGroupsOfflineMode { +public class TestRSGroupsOfflineMode extends TestRSGroupsBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -107,13 +106,13 @@ public static void tearDown() throws Exception { @Test public void testOffline() throws Exception, InterruptedException { // Table should be after group table name so it gets assigned later. - final TableName failoverTable = TableName.valueOf(name.getMethodName()); + final TableName failoverTable = TableName.valueOf(getNameWithoutIndex(name.getMethodName())); TEST_UTIL.createTable(failoverTable, Bytes.toBytes("f")); final HRegionServer killRS = ((MiniHBaseCluster) cluster).getRegionServer(0); final HRegionServer groupRS = ((MiniHBaseCluster) cluster).getRegionServer(1); final HRegionServer failoverRS = ((MiniHBaseCluster) cluster).getRegionServer(2); String newGroup = "my_group"; - RSGroupAdmin groupAdmin = new RSGroupAdminClient(TEST_UTIL.getConnection()); + RSGroupAdminClient groupAdmin = new RSGroupAdminClient(TEST_UTIL.getConnection()); groupAdmin.addRSGroup(newGroup); if (master.getAssignmentManager().getRegionStates().getRegionAssignments() .containsValue(failoverRS.getServerName())) { @@ -140,7 +139,8 @@ public boolean evaluate() throws Exception { } }); // Move table to group and wait. - groupAdmin.moveTables(Sets.newHashSet(RSGroupInfoManager.RSGROUP_TABLE_NAME), newGroup); + groupAdmin.setRSGroupForTables(Sets.newHashSet(RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME), + newGroup); LOG.info("Waiting for move table..."); TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override @@ -168,9 +168,6 @@ public boolean evaluate() throws Exception { .getMasterCoprocessorHost().findCoprocessor(RSGroupAdminEndpoint.class).getGroupInfoManager(); // Make sure balancer is in offline mode, since this is what we're testing. assertFalse(groupMgr.isOnline()); - // Verify the group affiliation that's loaded from ZK instead of tables. - assertEquals(newGroup, groupMgr.getRSGroupOfTable(RSGroupInfoManager.RSGROUP_TABLE_NAME)); - assertEquals(RSGroupInfo.DEFAULT_GROUP, groupMgr.getRSGroupOfTable(failoverTable)); // Kill final regionserver to see the failover happens for all tables except GROUP table since // it's group does not have any online RS. killRS.stop("die"); @@ -182,7 +179,7 @@ public boolean evaluate() throws Exception { return failoverRS.getRegions(failoverTable).size() >= 1; } }); - Assert.assertEquals(0, failoverRS.getRegions(RSGroupInfoManager.RSGROUP_TABLE_NAME).size()); + Assert.assertEquals(0, failoverRS.getRegions(RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME).size()); // Need this for minicluster to shutdown cleanly. master.stopMaster(); diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java similarity index 77% rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java index 0278e3cfbfa1..215dca7db1f1 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java @@ -21,6 +21,8 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; +import java.io.IOException; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -31,7 +33,12 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.HasMasterServices; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.UserProvider; +import org.apache.hadoop.hbase.security.access.AccessChecker; import org.apache.hadoop.hbase.security.access.AccessControlClient; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.PermissionStorage; @@ -39,7 +46,6 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.util.Bytes; - import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -49,11 +55,11 @@ import org.slf4j.LoggerFactory; /** - * Performs authorization checks for rsgroup operations, according to different - * levels of authorized users. + * Performs authorization checks for rsgroup operations, according to different levels of authorized + * users. */ -@Category({SecurityTests.class, MediumTests.class}) -public class TestRSGroupsWithACL extends SecureTestUtil{ +@Category({ SecurityTests.class, MediumTests.class }) +public class TestRSGroupsWithACL extends SecureTestUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -93,13 +99,15 @@ public class TestRSGroupsWithACL extends SecureTestUtil{ private static byte[] TEST_FAMILY = Bytes.toBytes("f1"); private static RSGroupAdminEndpoint rsGroupAdminEndpoint; + private static HMaster master; + private static AccessChecker accessChecker; + private static UserProvider userProvider; @BeforeClass public static void setupBeforeClass() throws Exception { // setup configuration conf = TEST_UTIL.getConfiguration(); - conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - RSGroupBasedLoadBalancer.class.getName()); + conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, RSGroupBasedLoadBalancer.class.getName()); // Enable security enableSecurity(conf); // Verify enableSecurity sets up what we require @@ -108,8 +116,8 @@ public static void setupBeforeClass() throws Exception { configureRSGroupAdminEndpoint(conf); TEST_UTIL.startMiniCluster(); - rsGroupAdminEndpoint = (RSGroupAdminEndpoint) TEST_UTIL.getMiniHBaseCluster().getMaster(). - getMasterCoprocessorHost().findCoprocessor(RSGroupAdminEndpoint.class.getName()); + rsGroupAdminEndpoint = (RSGroupAdminEndpoint) TEST_UTIL.getMiniHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(RSGroupAdminEndpoint.class.getName()); // Wait for the ACL table to become available TEST_UTIL.waitUntilAllRegionsAssigned(PermissionStorage.ACL_TABLE_NAME); @@ -133,6 +141,22 @@ public static void setupBeforeClass() throws Exception { systemUserConnection = TEST_UTIL.getConnection(); setUpTableAndUserPermissions(); + master = TEST_UTIL.getHBaseCluster().getMaster(); + accessChecker = master.getAccessChecker(); + userProvider = UserProvider.instantiate(TEST_UTIL.getConfiguration()); + } + + private void checkPermission(String request) throws IOException { + accessChecker.requirePermission(getActiveUser(), request, null, Permission.Action.ADMIN); + } + + private User getActiveUser() throws IOException { + // for non-rpc handling, fallback to system user + Optional optionalUser = RpcServer.getRequestUser(); + if (optionalUser.isPresent()) { + return optionalUser.get(); + } + return userProvider.getCurrent(); } private static void setUpTableAndUserPermissions() throws Exception { @@ -141,31 +165,21 @@ private static void setUpTableAndUserPermissions() throws Exception { cfd.setMaxVersions(100); tableBuilder.setColumnFamily(cfd.build()); tableBuilder.setValue(TableDescriptorBuilder.OWNER, USER_OWNER.getShortName()); - createTable(TEST_UTIL, tableBuilder.build(), - new byte[][] { Bytes.toBytes("s") }); + createTable(TEST_UTIL, tableBuilder.build(), new byte[][] { Bytes.toBytes("s") }); // Set up initial grants - grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), - Permission.Action.ADMIN, - Permission.Action.CREATE, - Permission.Action.READ, - Permission.Action.WRITE); + grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), Permission.Action.ADMIN, + Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE); - grantOnTable(TEST_UTIL, USER_RW.getShortName(), - TEST_TABLE, TEST_FAMILY, null, - Permission.Action.READ, - Permission.Action.WRITE); + grantOnTable(TEST_UTIL, USER_RW.getShortName(), TEST_TABLE, TEST_FAMILY, null, + Permission.Action.READ, Permission.Action.WRITE); // USER_CREATE is USER_RW plus CREATE permissions - grantOnTable(TEST_UTIL, USER_CREATE.getShortName(), - TEST_TABLE, null, null, - Permission.Action.CREATE, - Permission.Action.READ, - Permission.Action.WRITE); + grantOnTable(TEST_UTIL, USER_CREATE.getShortName(), TEST_TABLE, null, null, + Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE); - grantOnTable(TEST_UTIL, USER_RO.getShortName(), - TEST_TABLE, TEST_FAMILY, null, - Permission.Action.READ); + grantOnTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null, + Permission.Action.READ); grantGlobal(TEST_UTIL, toGroupEntry(GROUP_ADMIN), Permission.Action.ADMIN); grantGlobal(TEST_UTIL, toGroupEntry(GROUP_CREATE), Permission.Action.CREATE); @@ -174,8 +188,8 @@ private static void setUpTableAndUserPermissions() throws Exception { assertEquals(4, PermissionStorage.getTablePermissions(conf, TEST_TABLE).size()); try { - assertEquals(4, AccessControlClient.getUserPermissions(systemUserConnection, - TEST_TABLE.toString()).size()); + assertEquals(4, + AccessControlClient.getUserPermissions(systemUserConnection, TEST_TABLE.toString()).size()); } catch (AssertionError e) { fail(e.getMessage()); } catch (Throwable e) { @@ -210,14 +224,13 @@ private static void configureRSGroupAdminEndpoint(Configuration conf) { coprocessors += "," + currentCoprocessors; } conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, coprocessors); - conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - RSGroupBasedLoadBalancer.class.getName()); + conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, RSGroupBasedLoadBalancer.class.getName()); } @Test public void testGetRSGroupInfo() throws Exception { AccessTestAction action = () -> { - rsGroupAdminEndpoint.checkPermission("getRSGroupInfo"); + checkPermission("getRSGroupInfo"); return null; }; @@ -227,7 +240,7 @@ public void testGetRSGroupInfo() throws Exception { @Test public void testGetRSGroupInfoOfTable() throws Exception { AccessTestAction action = () -> { - rsGroupAdminEndpoint.checkPermission("getRSGroupInfoOfTable"); + checkPermission("getRSGroupInfoOfTable"); return null; }; @@ -237,7 +250,7 @@ public void testGetRSGroupInfoOfTable() throws Exception { @Test public void testMoveServers() throws Exception { AccessTestAction action = () -> { - rsGroupAdminEndpoint.checkPermission("moveServers"); + checkPermission("moveServers"); return null; }; @@ -247,7 +260,7 @@ public void testMoveServers() throws Exception { @Test public void testMoveTables() throws Exception { AccessTestAction action = () -> { - rsGroupAdminEndpoint.checkPermission("moveTables"); + checkPermission("moveTables"); return null; }; @@ -257,7 +270,7 @@ public void testMoveTables() throws Exception { @Test public void testAddRSGroup() throws Exception { AccessTestAction action = () -> { - rsGroupAdminEndpoint.checkPermission("addRSGroup"); + checkPermission("addRSGroup"); return null; }; @@ -267,7 +280,7 @@ public void testAddRSGroup() throws Exception { @Test public void testRemoveRSGroup() throws Exception { AccessTestAction action = () -> { - rsGroupAdminEndpoint.checkPermission("removeRSGroup"); + checkPermission("removeRSGroup"); return null; }; @@ -277,7 +290,7 @@ public void testRemoveRSGroup() throws Exception { @Test public void testBalanceRSGroup() throws Exception { AccessTestAction action = () -> { - rsGroupAdminEndpoint.checkPermission("balanceRSGroup"); + checkPermission("balanceRSGroup"); return null; }; @@ -287,7 +300,7 @@ public void testBalanceRSGroup() throws Exception { @Test public void testListRSGroup() throws Exception { AccessTestAction action = () -> { - rsGroupAdminEndpoint.checkPermission("listRSGroup"); + checkPermission("listRSGroup"); return null; }; @@ -297,7 +310,7 @@ public void testListRSGroup() throws Exception { @Test public void testGetRSGroupInfoOfServer() throws Exception { AccessTestAction action = () -> { - rsGroupAdminEndpoint.checkPermission("getRSGroupInfoOfServer"); + checkPermission("getRSGroupInfoOfServer"); return null; }; @@ -307,7 +320,7 @@ public void testGetRSGroupInfoOfServer() throws Exception { @Test public void testMoveServersAndTables() throws Exception { AccessTestAction action = () -> { - rsGroupAdminEndpoint.checkPermission("moveServersAndTables"); + checkPermission("moveServersAndTables"); return null; }; @@ -317,7 +330,7 @@ public void testMoveServersAndTables() throws Exception { @Test public void testRemoveServers() throws Exception { AccessTestAction action = () -> { - rsGroupAdminEndpoint.checkPermission("removeServers"); + checkPermission("removeServers"); return null; }; @@ -326,7 +339,7 @@ public void testRemoveServers() throws Exception { private void validateAdminPermissions(AccessTestAction action) throws Exception { verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); - verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, - USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); } } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java similarity index 56% rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java index 88a43396d7b6..7bdabf00e351 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java @@ -17,18 +17,29 @@ */ package org.apache.hadoop.hbase.rsgroup; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.ByteArrayInputStream; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; - +import java.util.SortedSet; +import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -38,22 +49,20 @@ import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; -import org.junit.Assert; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; @InterfaceAudience.Private -public class VerifyingRSGroupAdminClient implements RSGroupAdmin { - private Table table; +public class VerifyingRSGroupAdminClient extends RSGroupAdminClient { + private Connection conn; private ZKWatcher zkw; - private RSGroupAdmin wrapped; + private RSGroupAdminClient wrapped; - public VerifyingRSGroupAdminClient(RSGroupAdmin RSGroupAdmin, Configuration conf) + public VerifyingRSGroupAdminClient(RSGroupAdminClient RSGroupAdmin, Configuration conf) throws IOException { wrapped = RSGroupAdmin; - table = ConnectionFactory.createConnection(conf) - .getTable(RSGroupInfoManager.RSGROUP_TABLE_NAME); + conn = ConnectionFactory.createConnection(conf); zkw = new ZKWatcher(conf, this.getClass().getSimpleName(), null); } @@ -79,12 +88,6 @@ public void moveServers(Set
servers, String targetGroup) throws IOExcep verify(); } - @Override - public void moveTables(Set tables, String targetGroup) throws IOException { - wrapped.moveTables(tables, targetGroup); - verify(); - } - @Override public void removeRSGroup(String name) throws IOException { wrapped.removeRSGroup(name); @@ -119,34 +122,72 @@ public void removeServers(Set
servers) throws IOException { verify(); } + @Override + public void setRSGroupForTables(Set tables, String groupName) throws IOException{ + wrapped.setRSGroupForTables(tables, groupName); + verify(); + } + public void verify() throws IOException { Map groupMap = Maps.newHashMap(); Set zList = Sets.newHashSet(); - - for (Result result : table.getScanner(new Scan())) { - RSGroupProtos.RSGroupInfo proto = - RSGroupProtos.RSGroupInfo.parseFrom( - result.getValue( - RSGroupInfoManager.META_FAMILY_BYTES, - RSGroupInfoManager.META_QUALIFIER_BYTES)); - groupMap.put(proto.getName(), RSGroupProtobufUtil.toGroupInfo(proto)); + List tds = new ArrayList<>(); + try (Admin admin = conn.getAdmin()) { + tds.addAll(admin.listTableDescriptors()); + tds.addAll(admin.listTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME)); + } + SortedSet
lives = Sets.newTreeSet(); + for (ServerName sn : conn.getAdmin().getClusterMetrics().getLiveServerMetrics().keySet()) { + lives.add(sn.getAddress()); + } + for (ServerName sn : conn.getAdmin().listDecommissionedRegionServers()) { + lives.remove(sn.getAddress()); + } + try (Table table = conn.getTable(RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME); + ResultScanner scanner = table.getScanner(new Scan())) { + for (;;) { + Result result = scanner.next(); + if (result == null) { + break; + } + RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo.parseFrom(result.getValue( + RSGroupInfoManagerImpl.META_FAMILY_BYTES, RSGroupInfoManagerImpl.META_QUALIFIER_BYTES)); + RSGroupInfo rsGroupInfo = ProtobufUtil.toGroupInfo(proto); + groupMap.put(proto.getName(), RSGroupUtil.fillTables(rsGroupInfo, tds)); + for(Address address : rsGroupInfo.getServers()){ + lives.remove(address); + } + } } - Assert.assertEquals(Sets.newHashSet(groupMap.values()), - Sets.newHashSet(wrapped.listRSGroups())); + SortedSet tables = Sets.newTreeSet(); + for (TableDescriptor td : conn.getAdmin().listTableDescriptors(Pattern.compile(".*"), + true)){ + String groupName = td.getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP); + if (groupName.equals(RSGroupInfo.DEFAULT_GROUP)) { + tables.add(td.getTableName()); + } + } + + groupMap.put(RSGroupInfo.DEFAULT_GROUP, + new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, lives, tables)); + assertEquals(Sets.newHashSet(groupMap.values()), Sets.newHashSet(wrapped.listRSGroups())); try { String groupBasePath = ZNodePaths.joinZNode(zkw.getZNodePaths().baseZNode, "rsgroup"); - for(String znode: ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) { + for (String znode : ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) { byte[] data = ZKUtil.getData(zkw, ZNodePaths.joinZNode(groupBasePath, znode)); - if(data.length > 0) { + if (data.length > 0) { ProtobufUtil.expectPBMagicPrefix(data); - ByteArrayInputStream bis = new ByteArrayInputStream( - data, ProtobufUtil.lengthOfPBMagic(), data.length); - zList.add(RSGroupProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis))); + ByteArrayInputStream bis = + new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length); + RSGroupInfo rsGroupInfo = + ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)); + zList.add(RSGroupUtil.fillTables(rsGroupInfo, tds)); } } - Assert.assertEquals(zList.size(), groupMap.size()); - for(RSGroupInfo RSGroupInfo : zList) { - Assert.assertTrue(groupMap.get(RSGroupInfo.getName()).equals(RSGroupInfo)); + groupMap.remove(RSGroupInfo.DEFAULT_GROUP); + assertEquals(zList.size(), groupMap.size()); + for (RSGroupInfo rsGroupInfo : zList) { + assertTrue(groupMap.get(rsGroupInfo.getName()).equals(rsGroupInfo)); } } catch (KeeperException e) { throw new IOException("ZK verification failed", e); diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml index 7483da5968e0..a6b62ad5895d 100644 --- a/hbase-shell/pom.xml +++ b/hbase-shell/pom.xml @@ -187,41 +187,6 @@ - - rsgroup - - - !skip-rsgroup - - - - - org.apache.hbase - hbase-rsgroup - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - add-test-source - - add-test-source - - - - src/test/rsgroup - - - - - - - - skipShellTests diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index d7aea33f991b..83ae4902c101 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -50,12 +50,14 @@ import org.apache.hadoop.hbase.client.replication.TableCFs; import org.apache.hadoop.hbase.client.security.SecurityCapability; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.quotas.QuotaFilter; import org.apache.hadoop.hbase.quotas.QuotaSettings; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.SyncReplicationState; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.UserPermission; @@ -1142,6 +1144,56 @@ public List hasUserPermissions(String userName, List permis throw new NotImplementedException("hasUserPermissions not supported in ThriftAdmin"); } + @Override + public RSGroupInfo getRSGroupInfo(String groupName) { + throw new NotImplementedException("getRSGroupInfo not supported in ThriftAdmin"); + } + + @Override + public void moveServers(Set
servers, String targetGroup) { + throw new NotImplementedException("moveServers not supported in ThriftAdmin"); + } + + @Override + public void addRSGroup(String groupName) { + throw new NotImplementedException("addRSGroup not supported in ThriftAdmin"); + } + + @Override + public void removeRSGroup(String groupName) { + throw new NotImplementedException("removeRSGroup not supported in ThriftAdmin"); + } + + @Override + public boolean balanceRSGroup(String groupName) { + throw new NotImplementedException("balanceRSGroup not supported in ThriftAdmin"); + } + + @Override + public List listRSGroups() { + throw new NotImplementedException("listRSGroups not supported in ThriftAdmin"); + } + + @Override + public RSGroupInfo getRSGroupOfServer(Address hostPort) { + throw new NotImplementedException("getRSGroupOfServer not supported in ThriftAdmin"); + } + + @Override + public void removeServers(Set
servers) { + throw new NotImplementedException("removeServers not supported in ThriftAdmin"); + } + + @Override + public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) { + throw new NotImplementedException("getRSGroupInfoOfTable not supported in ThriftAdmin"); + } + + @Override + public void setRSGroupForTables(Set tables, String groupName) { + throw new NotImplementedException("setRSGroupForTables not supported in ThriftAdmin"); + } + @Override public Future splitRegionAsync(byte[] regionName) throws IOException { return splitRegionAsync(regionName, null); diff --git a/pom.xml b/pom.xml index 014d31d912ab..ec6437aa2633 100755 --- a/pom.xml +++ b/pom.xml @@ -1540,7 +1540,6 @@ hbase-procedure-${project.version}-tests.jar hbase-it-${project.version}-tests.jar hbase-annotations-${project.version}-tests.jar - hbase-rsgroup-${project.version}-tests.jar hbase-mapreduce-${project.version}-tests.jar hbase-zookeeper-${project.version}-tests.jar bash @@ -1670,18 +1669,6 @@ test-jar test - - hbase-rsgroup - org.apache.hbase - ${project.version} - - - hbase-rsgroup - org.apache.hbase - ${project.version} - test-jar - test - hbase-replication org.apache.hbase @@ -2315,17 +2302,6 @@ --> - - rsgroup - - - !skip-rsgroup - - - - hbase-rsgroup - - build-with-jdk8