diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 7422989d6aad2..8015de855d937 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.net.URI; import java.util.Arrays; import java.util.Collection; import java.util.LinkedHashMap; @@ -34,6 +35,10 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.viewfs.Constants; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -101,6 +106,9 @@ public class RouterAdmin extends Configured implements Tool { /** Pre-compiled regular expressions to detect duplicated slashes. */ private static final Pattern SLASHES = Pattern.compile("/+"); + // Parameter matching when initializing ViewFs mount point. + private static final String ALL_CLUSTERS = "allClusters"; + public static void main(String[] argv) throws Exception { Configuration conf = new HdfsConfiguration(); RouterAdmin admin = new RouterAdmin(conf); @@ -131,8 +139,8 @@ private String getUsage(String cmd) { String[] commands = {"-add", "-update", "-rm", "-ls", "-getDestination", "-setQuota", "-setStorageTypeQuota", "-clrQuota", "-clrStorageTypeQuota", - "-safemode", "-nameservice", "-getDisabledNameservices", - "-refresh", "-refreshRouterArgs", + "-initViewFsToMountTable", "-safemode", "-nameservice", + "-getDisabledNameservices", "-refresh", "-refreshRouterArgs", "-refreshSuperUserGroupsConfiguration"}; StringBuilder usage = new StringBuilder(); usage.append("Usage: hdfs dfsrouteradmin :\n"); @@ -171,7 +179,9 @@ private String getUsage(String cmd) { return "\t[-clrQuota ]"; } else if (cmd.equals("-clrStorageTypeQuota")) { return "\t[-clrStorageTypeQuota ]"; - } else if (cmd.equals("-safemode")) { + } else if (cmd.equals("-initViewFsToMountTable")) { + return "\t[-initViewFsToMountTable | allClusters]"; + }else if (cmd.equals("-safemode")) { return "\t[-safemode enter | leave | get]"; } else if (cmd.equals("-nameservice")) { return "\t[-nameservice enable | disable ]"; @@ -242,6 +252,10 @@ private boolean validateMin(String[] argv) { if (argv.length < 2) { return false; } + } else if ("-initViewFsToMountTable".equals(cmd)) { + if (argv.length < 2) { + return false; + } } else if ("-getDestination".equals(cmd)) { if (argv.length < 2) { return false; @@ -384,6 +398,15 @@ public int run(String[] argv) throws Exception { getDisabledNameservices(); } else if ("-refresh".equals(cmd)) { refresh(address); + } else if ("-initViewFsToMountTable".equals(cmd)) { + if (initViewFsToMountTable(argv[i])) { + System.out.println("Successfully init ViewFs mapping to router " + + argv[i]); + } else { + System.err.println( + "Failed when execute command initViewFsToMountTable"); + exitCode = -1; + } } else if ("-refreshRouterArgs".equals(cmd)) { exitCode = genericRefresh(argv, i); } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) { @@ -1036,6 +1059,83 @@ private boolean updateQuota(String mount, long nsQuota, long ssQuota) return updateResponse.getStatus(); } + /** + * Initialize the ViewFS mount point to the Router, + * either to specify a cluster or to initialize it all. + * @param clusterName The specified cluster to initialize, + * AllCluster was then all clusters. + * @return If the quota was updated. + * @throws IOException Error adding the mount point. + */ + public boolean initViewFsToMountTable(String clusterName) + throws IOException { + // fs.viewfs.mounttable.ClusterX.link./data + final String mountTablePrefix; + if (clusterName.equals(ALL_CLUSTERS)) { + mountTablePrefix = + Constants.CONFIG_VIEWFS_PREFIX + ".*" + + Constants.CONFIG_VIEWFS_LINK + "."; + } else { + mountTablePrefix = + Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." + + Constants.CONFIG_VIEWFS_LINK + "."; + } + final String rootPath = "/"; + Map viewFsMap = getConf().getValByRegex( + mountTablePrefix + rootPath); + if (viewFsMap.isEmpty()) { + System.out.println("There is no ViewFs mapping to initialize."); + return true; + } + for (Entry entry : viewFsMap.entrySet()) { + Path path = new Path(entry.getValue()); + URI destUri = path.toUri(); + String mountKey = entry.getKey(); + DestinationOrder order = DestinationOrder.HASH; + String mount = mountKey.replaceAll(mountTablePrefix, ""); + if (!destUri.getScheme().equals(HdfsConstants.HDFS_URI_SCHEME)) { + System.out.println("Only supports HDFS, " + + "added Mount Point failed , " + mountKey); + } + if (!mount.startsWith(rootPath) || + !destUri.getPath().startsWith(rootPath)) { + System.out.println("Added Mount Point failed " + mountKey); + continue; + } + String[] nss = new String[]{destUri.getAuthority()}; + boolean added = addMount( + mount, nss, destUri.getPath(), false, + false, order, getACLEntityFormHdfsPath(path, getConf())); + if (added) { + System.out.println("Added mount point " + mount); + } + } + return true; + } + + /** + * Returns ACLEntity according to a HDFS pat. + * @param path A path of HDFS. + */ + static private ACLEntity getACLEntityFormHdfsPath( + Path path, Configuration conf) { + String owner = null; + String group = null; + FsPermission mode = null; + try { + FileSystem fs = path.getFileSystem(conf); + if (fs.exists(path)) { + FileStatus fileStatus = fs.getFileStatus(path); + owner = fileStatus.getOwner(); + group = fileStatus.getGroup(); + mode = fileStatus.getPermission(); + } + } catch (IOException e) { + System.err.println("Exception encountered " + e); + } + return new ACLEntity(owner, group, mode); + } + /** * Update storage type quota of specified mount table. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index d7838c75f3804..32230f744035b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -241,6 +241,26 @@ Mount table permission can be set by following command: The option mode is UNIX-style permissions for the mount table. Permissions are specified in octal, e.g. 0755. By default, this is set to 0755. +#### Init ViewFs To Router +Router supports initializing the [ViewFs](../hadoop-hdfs/ViewFs.html) mount point to the Router. The mapping directory protocol of ViewFS must be HDFS, and the initializer only supports one-to-one mapping. + +For example, use the following [ViewFs](../hadoop-hdfs/ViewFs.html) to configure the initial mount table to the router. + + + + fs.viewfs.mounttable.ClusterX.link./data + hdfs://nn1-clusterx.example.com:8020/data + + + fs.viewfs.mounttable.ClusterY.link./project + hdfs://nn1-clustery.example.com:8020/project + + + +The [ViewFs](../hadoop-hdfs/ViewFs.html) mount table can be initialized to the Router by using the following command: + + [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -initViewFsToMountTable [ | allClusters] + #### Quotas Router-based federation supports global quota at mount table level. Mount table entries may spread multiple subclusters and the global quota will be accounted across these subclusters. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index 1daff053ed5a4..62579605bc4fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.federation.router; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_PREFIX; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createNamenodeReport; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -34,8 +35,10 @@ import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; @@ -78,6 +81,8 @@ public class TestRouterAdminCLI { private static RouterClient client; private static Router router; + private static DistributedFileSystem hdfs; + private static final String TEST_USER = "test-user"; private final ByteArrayOutputStream out = new ByteArrayOutputStream(); @@ -85,6 +90,24 @@ public class TestRouterAdminCLI { private static final PrintStream OLD_OUT = System.out; private static final PrintStream OLD_ERR = System.err; + // testInitViewFsToMountTable use + private static final String BASEDIR = "/initViewFs"; + private static final String SRC1 = BASEDIR + "/data1"; + private static final String USER1 = "user1"; + private static final String GROUP1 = "group1"; + private static final String CLUSTER_NAME1 = "ClusterX"; + private static Path destPath1; + + private static final String SRC2 = BASEDIR + "/data2"; + private static final String CLUSTER_NAME2 = "ClusterY"; + + private static final String SRC3 = BASEDIR + "/inExistent"; + private static Path destPath3; + + private static String nnAddress; + + + @BeforeClass public static void globalSetUp() throws Exception { cluster = new StateStoreDFSCluster(false, 1, @@ -102,10 +125,12 @@ public static void globalSetUp() throws Exception { // Start routers cluster.startRouters(); + cluster.startCluster(); routerContext = cluster.getRandomRouter(); router = routerContext.getRouter(); stateStore = router.getStateStore(); + hdfs = cluster.getCluster().getFileSystem(); Configuration routerConf = new Configuration(); InetSocketAddress routerSocket = router.getAdminServerAddress(); @@ -700,6 +725,129 @@ public void testAddMountTableIfParentExist() throws Exception { } } + public void setInitViewFsToMountEnv() throws IOException { + nnAddress = cluster.getRandomNamenode(). + getNamenode().getHostAndPort(); + destPath1 = new Path("hdfs://" + nnAddress + SRC1); + destPath3 = new Path("hdfs://" + nnAddress + SRC3); + hdfs.mkdirs(destPath1); + hdfs.setOwner(destPath1, USER1, GROUP1); + admin.getConf().set(CONFIG_VIEWFS_PREFIX + "." + + CLUSTER_NAME1 + ".link." + SRC1, destPath1.toString()); + admin.getConf().set(CONFIG_VIEWFS_PREFIX + "." + + CLUSTER_NAME2 + ".link." + SRC2, destPath1.toString()); + } + + @Test + public void testInitViewFsToMountTableWithSpecificCluster() throws Exception { + // re-set system out for testing + System.setOut(new PrintStream(out)); + stateStore.loadCache(MountTableStoreImpl.class, true); + // 1.Initialize the environment + setInitViewFsToMountEnv(); + // 2.Run initialization,Specify a ClusterName + String[] argv = new String[]{"-initViewFsToMountTable", CLUSTER_NAME1}; + assertEquals(0, ToolRunner.run(admin, argv)); + // 3.Gets the mount point entries + stateStore.loadCache(MountTableStoreImpl.class, true); + GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest + .newInstance(SRC1); + GetMountTableEntriesResponse getResponse = client.getMountTableManager() + .getMountTableEntries(getRequest); + List mountTables = getResponse.getEntries(); + // 4.Checking + assertEquals(1, mountTables.size()); + MountTable mountTable = mountTables.get(0); + List destinations = mountTable.getDestinations(); + assertEquals(1, destinations.size()); + assertEquals(USER1, mountTable.getOwnerName()); + assertEquals(GROUP1, mountTable.getGroupName()); + assertEquals(destPath1.toUri().getPath(), mountTable. + getDestinations().get(0).getDest()); + assertEquals(nnAddress, mountTable. + getDestinations().get(0).getNameserviceId()); + assertEquals(SRC1, mountTable.getSourcePath()); + // 5.Clear up + argv = new String[]{"-rm", SRC1}; + assertEquals(0, ToolRunner.run(admin, argv)); + } + + @Test + public void testInitViewFsToMountTableWithAllCluster() throws Exception { + // re-set system out for testing + System.setOut(new PrintStream(out)); + stateStore.loadCache(MountTableStoreImpl.class, true); + // 1.Initialize the environment + setInitViewFsToMountEnv(); + // 2.Specify allCluster to initialize all mappings + stateStore.loadCache(MountTableStoreImpl.class, true); + String[] argv = new String[]{"-initViewFsToMountTable", "allClusters"}; + assertEquals(0, ToolRunner.run(admin, argv)); + // 3.Gets the mount point entries + stateStore.loadCache(MountTableStoreImpl.class, true); + GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest + .newInstance(BASEDIR); + GetMountTableEntriesResponse getResponse = client.getMountTableManager() + .getMountTableEntries(getRequest); + List mountTables = getResponse.getEntries(); + assertEquals(2, mountTables.size()); + // 3.Checking + for (MountTable mountTable1 : mountTables) { + List destinations = mountTable1.getDestinations(); + assertEquals(1, destinations.size()); + assertEquals(USER1, mountTable1.getOwnerName()); + assertEquals(GROUP1, mountTable1.getGroupName()); + assertEquals(destPath1.toUri().getPath(), mountTable1. + getDestinations().get(0).getDest()); + assertEquals(nnAddress, mountTable1. + getDestinations().get(0).getNameserviceId()); + } + assertEquals(SRC1, mountTables.get(0).getSourcePath()); + assertEquals(SRC2, mountTables.get(1).getSourcePath()); + // 5.Clear up + argv = new String[]{"-rm", SRC1}; + assertEquals(0, ToolRunner.run(admin, argv)); + argv = new String[]{"-rm", SRC2}; + assertEquals(0, ToolRunner.run(admin, argv)); + } + + @Test + public void testInitViewFsToMountTableMountNoExist() throws Exception { + // re-set system out for testing + System.setOut(new PrintStream(out)); + stateStore.loadCache(MountTableStoreImpl.class, true); + // 1.Initialize the environment + setInitViewFsToMountEnv(); + // When the mount directory does not exist + String clusterName3 = "ClusterZ"; + admin.getConf().set(CONFIG_VIEWFS_PREFIX + "." + + clusterName3 + ".link." + SRC3, destPath3.toString()); + // 2.Run initialization,Specify a ClusterName + String[] argv = new String[]{"-initViewFsToMountTable", clusterName3}; + assertEquals(0, ToolRunner.run(admin, argv)); + // 3.Gets the mount point entries + stateStore.loadCache(MountTableStoreImpl.class, true); + GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest + .newInstance(SRC3); + GetMountTableEntriesResponse getResponse = client.getMountTableManager() + .getMountTableEntries(getRequest); + List mountTables = getResponse.getEntries(); + // 4.Checking + assertEquals(1, mountTables.size()); + MountTable mountTable = mountTables.get(0); + List destinations = mountTable.getDestinations(); + assertEquals(1, destinations.size()); + assertEquals(System.getProperty("user.name"), mountTable.getOwnerName()); + assertEquals(destPath3.toUri().getPath(), mountTable. + getDestinations().get(0).getDest()); + assertEquals(nnAddress, mountTable. + getDestinations().get(0).getNameserviceId()); + assertEquals(SRC3, mountTable.getSourcePath()); + // 5.Clear up + argv = new String[]{"-rm", SRC3}; + assertEquals(0, ToolRunner.run(admin, argv)); + } + @Test public void testMountTablePermissions() throws Exception { // re-set system out for testing @@ -810,6 +958,13 @@ public void testInvalidArgumentMessage() throws Exception { assertTrue(out.toString().contains("\t[-clrQuota ]")); out.reset(); + argv = new String[] {"-initViewFsToMountTable"}; + assertEquals(-1, ToolRunner.run(admin, argv)); + System.err.println(out.toString()); + assertTrue(out.toString(). + contains("[-initViewFsToMountTable | allClusters]")); + out.reset(); + argv = new String[] {"-safemode"}; assertEquals(-1, ToolRunner.run(admin, argv)); assertTrue(out.toString().contains("\t[-safemode enter | leave | get]")); @@ -852,6 +1007,7 @@ public void testInvalidArgumentMessage() throws Exception { + " ]\n" + "\t[-clrQuota ]\n" + "\t[-clrStorageTypeQuota ]\n" + + "\t[-initViewFsToMountTable | allClusters]\n" + "\t[-safemode enter | leave | get]\n" + "\t[-nameservice enable | disable ]\n" + "\t[-getDisabledNameservices]\n"