From ef407a8065b4e6a0889d90b5cbbb1b81a563d107 Mon Sep 17 00:00:00 2001 From: zhuxiangyi Date: Thu, 6 May 2021 11:23:26 +0800 Subject: [PATCH 01/11] HDFS-16008. Initialize the ViewFS Maping tool to the Router --- .../hdfs/tools/federation/RouterAdmin.java | 71 +++++++++++++++++-- 1 file changed, 67 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 7422989d6aad2..3a993ee7aa62f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -34,6 +34,10 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.viewfs.Constants; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -132,7 +136,7 @@ private String getUsage(String cmd) { {"-add", "-update", "-rm", "-ls", "-getDestination", "-setQuota", "-setStorageTypeQuota", "-clrQuota", "-clrStorageTypeQuota", "-safemode", "-nameservice", "-getDisabledNameservices", - "-refresh", "-refreshRouterArgs", + "-refresh", "-initViewFsToMountTable", "-refreshRouterArgs", "-refreshSuperUserGroupsConfiguration"}; StringBuilder usage = new StringBuilder(); usage.append("Usage: hdfs dfsrouteradmin :\n"); @@ -171,7 +175,10 @@ private String getUsage(String cmd) { return "\t[-clrQuota ]"; } else if (cmd.equals("-clrStorageTypeQuota")) { return "\t[-clrStorageTypeQuota ]"; - } else if (cmd.equals("-safemode")) { + } else if (cmd.equals("-initViewFsToMountTable")) { + return "\t[-initViewFsToMountTable ," + + "-initViewFsToMountTable ClusterX]"; + }else if (cmd.equals("-safemode")) { return "\t[-safemode enter | leave | get]"; } else if (cmd.equals("-nameservice")) { return "\t[-nameservice enable | disable ]"; @@ -384,7 +391,14 @@ public int run(String[] argv) throws Exception { getDisabledNameservices(); } else if ("-refresh".equals(cmd)) { refresh(address); - } else if ("-refreshRouterArgs".equals(cmd)) { + } else if ("-initViewFsToMountTable".equals(cmd)) { + if (initViewFsToMountTable(argv, i)) { + System.out.println("Successfully init ViewFs mapping to router " + argv[i]); + } else { + exitCode = -1; + } + } + else if ("-refreshRouterArgs".equals(cmd)) { exitCode = genericRefresh(argv, i); } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) { exitCode = refreshSuperUserGroupsConfiguration(); @@ -1035,7 +1049,56 @@ private boolean updateQuota(String mount, long nsQuota, long ssQuota) .updateMountTableEntry(updateRequest); return updateResponse.getStatus(); } - + + /** + * initViewFsToMountTable. + * + * @param parameters The specified cluster to initialize. + * @param i Index in the parameters + * @return If the quota was updated. + * @throws IOException Error adding the mount point. + */ + public boolean initViewFsToMountTable(String[] parameters, int i) throws IOException { + String clusterName = parameters[i++]; + if (clusterName == null) { + System.out.println("Please enter the cluster name."); + return false; + } + final String mountTablePrefix = + Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." + + Constants.CONFIG_VIEWFS_LINK + "./"; + Map viewFsMap = getConf().getValByRegex(mountTablePrefix); + if (viewFsMap.size() == 0) { + System.out.println("Please check the cluster name and veiwfs " + + "configuration."); + } + for (String key : viewFsMap.keySet()) { + Path path = new Path(viewFsMap.get(key)); + String owner = null; + String group = null; + FsPermission mode = null; + try { + FileSystem fs = path.getFileSystem(getConf()); + if (fs.exists(path)) { + FileStatus fileStatus = fs.getFileStatus(path); + owner = fileStatus.getOwner(); + group = fileStatus.getGroup(); + mode = fileStatus.getPermission(); + } + } catch (Exception e) { + LOG.warn("Exception encountered", e); + } + DestinationOrder order = DestinationOrder.HASH; + String mount = + key.split(clusterName + "." + Constants.CONFIG_VIEWFS_LINK + ".")[1]; + String dest = path.toUri().getPath(); + String[] nss = new String[]{path.toUri().getAuthority()}; + addMount(mount, nss, dest, false, false, order, + new ACLEntity(owner, group, mode)); + } + return true; + } + /** * Update storage type quota of specified mount table. * From 9a7c9f1cee89bd5ee8def106078b925b5d0f65b3 Mon Sep 17 00:00:00 2001 From: zhuxiangyi Date: Thu, 6 May 2021 17:39:42 +0800 Subject: [PATCH 02/11] Address comments and add unit tests --- .../hdfs/tools/federation/RouterAdmin.java | 87 +++++++++++-------- .../federation/router/TestRouterAdminCLI.java | 47 ++++++++++ 2 files changed, 98 insertions(+), 36 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 3a993ee7aa62f..6efc0f5ac7c34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -249,6 +249,10 @@ private boolean validateMin(String[] argv) { if (argv.length < 2) { return false; } + } else if ("-initViewFsToMountTable".equals(cmd)) { + if (argv.length < 2) { + return false; + } } else if ("-getDestination".equals(cmd)) { if (argv.length < 2) { return false; @@ -392,8 +396,9 @@ public int run(String[] argv) throws Exception { } else if ("-refresh".equals(cmd)) { refresh(address); } else if ("-initViewFsToMountTable".equals(cmd)) { - if (initViewFsToMountTable(argv, i)) { - System.out.println("Successfully init ViewFs mapping to router " + argv[i]); + if (initViewFsToMountTable(argv[i])) { + System.out.println("Successfully init ViewFs mapping to router " + + argv[i]); } else { exitCode = -1; } @@ -1052,53 +1057,63 @@ private boolean updateQuota(String mount, long nsQuota, long ssQuota) /** * initViewFsToMountTable. - * - * @param parameters The specified cluster to initialize. - * @param i Index in the parameters + * @param clusterName The specified cluster to initialize. * @return If the quota was updated. * @throws IOException Error adding the mount point. */ - public boolean initViewFsToMountTable(String[] parameters, int i) throws IOException { - String clusterName = parameters[i++]; - if (clusterName == null) { - System.out.println("Please enter the cluster name."); - return false; - } + public boolean initViewFsToMountTable(String clusterName) + throws IOException { + // fs.viewfs.mounttable.ClusterX.link./data final String mountTablePrefix = Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." + Constants.CONFIG_VIEWFS_LINK + "./"; Map viewFsMap = getConf().getValByRegex(mountTablePrefix); - if (viewFsMap.size() == 0) { - System.out.println("Please check the cluster name and veiwfs " + - "configuration."); + if (viewFsMap.isEmpty()) { + System.out.println("There is no ViewFs mapping to initialize."); + return true; } - for (String key : viewFsMap.keySet()) { - Path path = new Path(viewFsMap.get(key)); - String owner = null; - String group = null; - FsPermission mode = null; - try { - FileSystem fs = path.getFileSystem(getConf()); - if (fs.exists(path)) { - FileStatus fileStatus = fs.getFileStatus(path); - owner = fileStatus.getOwner(); - group = fileStatus.getGroup(); - mode = fileStatus.getPermission(); - } - } catch (Exception e) { - LOG.warn("Exception encountered", e); - } + for (Entry entry : viewFsMap.entrySet()) { + Path path = new Path(entry.getValue()); DestinationOrder order = DestinationOrder.HASH; - String mount = - key.split(clusterName + "." + Constants.CONFIG_VIEWFS_LINK + ".")[1]; - String dest = path.toUri().getPath(); + String[] mount = entry.getKey().split( + clusterName + "." + Constants.CONFIG_VIEWFS_LINK + "."); + if (mount.length < 2) { + System.out.println("Added Mount Point failed " + entry.getKey()); + continue; + } String[] nss = new String[]{path.toUri().getAuthority()}; - addMount(mount, nss, dest, false, false, order, - new ACLEntity(owner, group, mode)); + boolean added = addMount( + mount[1], nss, path.toUri().getPath(), false, + false, order, getACLEntityFormHdfsPath(path)); + if (added) { + System.out.println("added mount point " + mount[1]); + } } return true; } - + + /** + * Returns ACLEntity according to a HDFS pat. + * @param path A path of HDFS. + */ + public ACLEntity getACLEntityFormHdfsPath(Path path){ + String owner = null; + String group = null; + FsPermission mode = null; + try { + FileSystem fs = path.getFileSystem(getConf()); + if (fs.exists(path)) { + FileStatus fileStatus = fs.getFileStatus(path); + owner = fileStatus.getOwner(); + group = fileStatus.getGroup(); + mode = fileStatus.getPermission(); + } + } catch (IOException e) { + System.out.println("Exception encountered " + e); + } + return new ACLEntity(owner, group, mode); + } + /** * Update storage type quota of specified mount table. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index 1daff053ed5a4..fbfe5130ca59a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -34,8 +34,11 @@ import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; @@ -78,6 +81,8 @@ public class TestRouterAdminCLI { private static RouterClient client; private static Router router; + private static DistributedFileSystem hdfs; + private static final String TEST_USER = "test-user"; private final ByteArrayOutputStream out = new ByteArrayOutputStream(); @@ -102,10 +107,12 @@ public static void globalSetUp() throws Exception { // Start routers cluster.startRouters(); + cluster.startCluster(); routerContext = cluster.getRandomRouter(); router = routerContext.getRouter(); stateStore = router.getStateStore(); + hdfs = cluster.getCluster().getFileSystem(); Configuration routerConf = new Configuration(); InetSocketAddress routerSocket = router.getAdminServerAddress(); @@ -700,6 +707,46 @@ public void testAddMountTableIfParentExist() throws Exception { } } + @Test + public void testInitViewFsToMountTable() throws Exception { + // re-set system out for testing + System.setOut(new PrintStream(out)); + stateStore.loadCache(MountTableStoreImpl.class, true); + String nnAddress = cluster.getRandomNamenode().getNamenode().getHostAndPort(); + + String src = "/data"; + Path destPath = new Path("hdfs://" + nnAddress + "/data"); + String user = "user1"; + String group = "group1"; + String clusterName = "ClusterX"; + + // 0.mkdir destPath + hdfs.mkdirs(destPath); + // 1.set owner + hdfs.setOwner(destPath, user, group); + // 2.set viewFs mapping + admin.getConf().set("fs.viewfs.mounttable.ClusterX.link." + src, destPath.toString()); + // 3.run initialization + String[] argv = new String[]{"-initViewFsToMountTable", clusterName}; + assertEquals(0, ToolRunner.run(admin, argv)); + // 4.gets the mount point entries + stateStore.loadCache(MountTableStoreImpl.class, true); + GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest + .newInstance("/"); + GetMountTableEntriesResponse getResponse = client.getMountTableManager() + .getMountTableEntries(getRequest); + List mountTables = getResponse.getEntries(); + // 5.check + assertEquals(1, mountTables.size()); + assertEquals(user, mountTables.get(0).getOwnerName()); + assertEquals(group, mountTables.get(0).getGroupName()); + assertEquals(destPath.toUri().getPath(), mountTables.get(0). + getDestinations().get(0).getDest()); + assertEquals(nnAddress, mountTables.get(0). + getDestinations().get(0).getNameserviceId()); + assertEquals(src, mountTables.get(0).getSourcePath()); + } + @Test public void testMountTablePermissions() throws Exception { // re-set system out for testing From e0b806f234d7217010f9ac1d804f08b77025bc70 Mon Sep 17 00:00:00 2001 From: zhuxiangyi Date: Fri, 7 May 2021 14:22:36 +0800 Subject: [PATCH 03/11] Address comments and add doc --- .../hdfs/tools/federation/RouterAdmin.java | 46 +++++++++++-------- .../src/site/markdown/HDFSRouterFederation.md | 16 +++++++ .../federation/router/TestRouterAdminCLI.java | 28 +++++++---- 3 files changed, 62 insertions(+), 28 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 6efc0f5ac7c34..00449f9e3d00e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.net.URI; import java.util.Arrays; import java.util.Collection; import java.util.LinkedHashMap; @@ -135,8 +136,8 @@ private String getUsage(String cmd) { String[] commands = {"-add", "-update", "-rm", "-ls", "-getDestination", "-setQuota", "-setStorageTypeQuota", "-clrQuota", "-clrStorageTypeQuota", - "-safemode", "-nameservice", "-getDisabledNameservices", - "-refresh", "-initViewFsToMountTable", "-refreshRouterArgs", + "-initViewFsToMountTable", "-safemode", "-nameservice", + "-getDisabledNameservices", "-refresh", "-refreshRouterArgs", "-refreshSuperUserGroupsConfiguration"}; StringBuilder usage = new StringBuilder(); usage.append("Usage: hdfs dfsrouteradmin :\n"); @@ -176,8 +177,7 @@ private String getUsage(String cmd) { } else if (cmd.equals("-clrStorageTypeQuota")) { return "\t[-clrStorageTypeQuota ]"; } else if (cmd.equals("-initViewFsToMountTable")) { - return "\t[-initViewFsToMountTable ," + - "-initViewFsToMountTable ClusterX]"; + return "\t[-initViewFsToMountTable ]"; }else if (cmd.equals("-safemode")) { return "\t[-safemode enter | leave | get]"; } else if (cmd.equals("-nameservice")) { @@ -402,8 +402,7 @@ public int run(String[] argv) throws Exception { } else { exitCode = -1; } - } - else if ("-refreshRouterArgs".equals(cmd)) { + } else if ("-refreshRouterArgs".equals(cmd)) { exitCode = genericRefresh(argv, i); } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) { exitCode = refreshSuperUserGroupsConfiguration(); @@ -1066,27 +1065,35 @@ public boolean initViewFsToMountTable(String clusterName) // fs.viewfs.mounttable.ClusterX.link./data final String mountTablePrefix = Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." + - Constants.CONFIG_VIEWFS_LINK + "./"; - Map viewFsMap = getConf().getValByRegex(mountTablePrefix); + Constants.CONFIG_VIEWFS_LINK + "."; + final String rootPath = "/"; + Map viewFsMap = getConf().getValByRegex( + mountTablePrefix + rootPath); if (viewFsMap.isEmpty()) { System.out.println("There is no ViewFs mapping to initialize."); return true; } for (Entry entry : viewFsMap.entrySet()) { Path path = new Path(entry.getValue()); + URI destUri = path.toUri(); + String mountKey = entry.getKey(); DestinationOrder order = DestinationOrder.HASH; - String[] mount = entry.getKey().split( - clusterName + "." + Constants.CONFIG_VIEWFS_LINK + "."); - if (mount.length < 2) { - System.out.println("Added Mount Point failed " + entry.getKey()); + String mount = mountKey.replaceAll(mountTablePrefix, ""); + if (!destUri.getScheme().equals("hdfs")) { + System.out.println("Only supports HDFS, " + + "added Mount Point failed , " + mountKey); + } + if (!mount.startsWith(rootPath) || + !destUri.getPath().startsWith(rootPath)) { + System.out.println("Added Mount Point failed " + mountKey); continue; } - String[] nss = new String[]{path.toUri().getAuthority()}; + String[] nss = new String[]{destUri.getAuthority()}; boolean added = addMount( - mount[1], nss, path.toUri().getPath(), false, - false, order, getACLEntityFormHdfsPath(path)); + mount, nss, destUri.getPath(), false, + false, order, getACLEntityFormHdfsPath(path, getConf())); if (added) { - System.out.println("added mount point " + mount[1]); + System.out.println("Added mount point " + mount); } } return true; @@ -1096,12 +1103,13 @@ public boolean initViewFsToMountTable(String clusterName) * Returns ACLEntity according to a HDFS pat. * @param path A path of HDFS. */ - public ACLEntity getACLEntityFormHdfsPath(Path path){ + static public ACLEntity getACLEntityFormHdfsPath( + Path path, Configuration conf) { String owner = null; String group = null; FsPermission mode = null; try { - FileSystem fs = path.getFileSystem(getConf()); + FileSystem fs = path.getFileSystem(conf); if (fs.exists(path)) { FileStatus fileStatus = fs.getFileStatus(path); owner = fileStatus.getOwner(); @@ -1109,7 +1117,7 @@ public ACLEntity getACLEntityFormHdfsPath(Path path){ mode = fileStatus.getPermission(); } } catch (IOException e) { - System.out.println("Exception encountered " + e); + System.err.println("Exception encountered " + e); } return new ACLEntity(owner, group, mode); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index d7838c75f3804..55b84d1a593f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -241,6 +241,22 @@ Mount table permission can be set by following command: The option mode is UNIX-style permissions for the mount table. Permissions are specified in octal, e.g. 0755. By default, this is set to 0755. +#### Init ViewFs To Router +Router supports initializing the ViewFS mount point to the Router. The mapping directory protocol of ViewFS must be HDFS, and the initializer only supports one-to-one mapping. + +For example, use the following viewfs to configure the initial mount table to the router. + + + + fs.viewfs.mounttable.ClusterX.link./data + hdfs://nn1-clusterx.example.com:8020/data + + + +The ViewFS mount table can be initialized to the Router by using the following command: + + [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -initViewFsToMountTable ClusterX + #### Quotas Router-based federation supports global quota at mount table level. Mount table entries may spread multiple subclusters and the global quota will be accounted across these subclusters. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index fbfe5130ca59a..095dc2562b555 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -34,7 +34,6 @@ import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; @@ -712,7 +711,8 @@ public void testInitViewFsToMountTable() throws Exception { // re-set system out for testing System.setOut(new PrintStream(out)); stateStore.loadCache(MountTableStoreImpl.class, true); - String nnAddress = cluster.getRandomNamenode().getNamenode().getHostAndPort(); + String nnAddress = cluster.getRandomNamenode(). + getNamenode().getHostAndPort(); String src = "/data"; Path destPath = new Path("hdfs://" + nnAddress + "/data"); @@ -725,26 +725,28 @@ public void testInitViewFsToMountTable() throws Exception { // 1.set owner hdfs.setOwner(destPath, user, group); // 2.set viewFs mapping - admin.getConf().set("fs.viewfs.mounttable.ClusterX.link." + src, destPath.toString()); + admin.getConf().set( + "fs.viewfs.mounttable.ClusterX.link." + src, destPath.toString()); // 3.run initialization String[] argv = new String[]{"-initViewFsToMountTable", clusterName}; assertEquals(0, ToolRunner.run(admin, argv)); // 4.gets the mount point entries stateStore.loadCache(MountTableStoreImpl.class, true); GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest - .newInstance("/"); + .newInstance(src); GetMountTableEntriesResponse getResponse = client.getMountTableManager() .getMountTableEntries(getRequest); List mountTables = getResponse.getEntries(); // 5.check assertEquals(1, mountTables.size()); - assertEquals(user, mountTables.get(0).getOwnerName()); - assertEquals(group, mountTables.get(0).getGroupName()); - assertEquals(destPath.toUri().getPath(), mountTables.get(0). + MountTable mountTable = mountTables.get(0); + assertEquals(user, mountTable.getOwnerName()); + assertEquals(group, mountTable.getGroupName()); + assertEquals(destPath.toUri().getPath(), mountTable. getDestinations().get(0).getDest()); - assertEquals(nnAddress, mountTables.get(0). + assertEquals(nnAddress, mountTable. getDestinations().get(0).getNameserviceId()); - assertEquals(src, mountTables.get(0).getSourcePath()); + assertEquals(src, mountTable.getSourcePath()); } @Test @@ -857,6 +859,13 @@ public void testInvalidArgumentMessage() throws Exception { assertTrue(out.toString().contains("\t[-clrQuota ]")); out.reset(); + argv = new String[] {"-initViewFsToMountTable"}; + assertEquals(-1, ToolRunner.run(admin, argv)); + System.err.println(out.toString()); + assertTrue(out.toString(). + contains("[-initViewFsToMountTable ]")); + out.reset(); + argv = new String[] {"-safemode"}; assertEquals(-1, ToolRunner.run(admin, argv)); assertTrue(out.toString().contains("\t[-safemode enter | leave | get]")); @@ -899,6 +908,7 @@ public void testInvalidArgumentMessage() throws Exception { + " ]\n" + "\t[-clrQuota ]\n" + "\t[-clrStorageTypeQuota ]\n" + +"\t[-initViewFsToMountTable ]\n" + "\t[-safemode enter | leave | get]\n" + "\t[-nameservice enable | disable ]\n" + "\t[-getDisabledNameservices]\n" From 7fa083ac87f720663afb7c52953c830e45cefd4a Mon Sep 17 00:00:00 2001 From: zhuxiangyi Date: Fri, 7 May 2021 16:14:18 +0800 Subject: [PATCH 04/11] fix checkstyle of blanks --- .../org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 00449f9e3d00e..c98f1637a6f6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -1053,7 +1053,7 @@ private boolean updateQuota(String mount, long nsQuota, long ssQuota) .updateMountTableEntry(updateRequest); return updateResponse.getStatus(); } - + /** * initViewFsToMountTable. * @param clusterName The specified cluster to initialize. From 3be7673a4f31d82f0e2669a1e2d16d132b8d80ab Mon Sep 17 00:00:00 2001 From: zhuxiangyi Date: Sat, 8 May 2021 15:38:59 +0800 Subject: [PATCH 05/11] Address comments --- .../src/site/markdown/HDFSRouterFederation.md | 6 +++--- .../hdfs/server/federation/router/TestRouterAdminCLI.java | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index 55b84d1a593f1..bde035174a00f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -242,9 +242,9 @@ Mount table permission can be set by following command: The option mode is UNIX-style permissions for the mount table. Permissions are specified in octal, e.g. 0755. By default, this is set to 0755. #### Init ViewFs To Router -Router supports initializing the ViewFS mount point to the Router. The mapping directory protocol of ViewFS must be HDFS, and the initializer only supports one-to-one mapping. +Router supports initializing the [ViewFs](../hadoop-hdfs/ViewFs.html) mount point to the Router. The mapping directory protocol of ViewFS must be HDFS, and the initializer only supports one-to-one mapping. -For example, use the following viewfs to configure the initial mount table to the router. +For example, use the following [ViewFs](../hadoop-hdfs/ViewFs.html) to configure the initial mount table to the router. @@ -253,7 +253,7 @@ For example, use the following viewfs to configure the initial mount table to th -The ViewFS mount table can be initialized to the Router by using the following command: +The [ViewFs](../hadoop-hdfs/ViewFs.html) mount table can be initialized to the Router by using the following command: [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -initViewFsToMountTable ClusterX diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index 095dc2562b555..9f9758b431662 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -740,6 +740,8 @@ public void testInitViewFsToMountTable() throws Exception { // 5.check assertEquals(1, mountTables.size()); MountTable mountTable = mountTables.get(0); + List destinations = mountTable.getDestinations(); + assertEquals(1, destinations.size()); assertEquals(user, mountTable.getOwnerName()); assertEquals(group, mountTable.getGroupName()); assertEquals(destPath.toUri().getPath(), mountTable. @@ -908,7 +910,7 @@ public void testInvalidArgumentMessage() throws Exception { + " ]\n" + "\t[-clrQuota ]\n" + "\t[-clrStorageTypeQuota ]\n" - +"\t[-initViewFsToMountTable ]\n" + + "\t[-initViewFsToMountTable ]\n" + "\t[-safemode enter | leave | get]\n" + "\t[-nameservice enable | disable ]\n" + "\t[-getDisabledNameservices]\n" From c33ba431ae357c5ac891bbe8e90140cea4243c9f Mon Sep 17 00:00:00 2001 From: zhuxiangyi Date: Tue, 11 May 2021 13:58:47 +0800 Subject: [PATCH 06/11] Address comments: support for all clusters --- .../hdfs/tools/federation/RouterAdmin.java | 15 ++- .../src/site/markdown/HDFSRouterFederation.md | 6 +- .../federation/router/TestRouterAdminCLI.java | 105 ++++++++++++++---- 3 files changed, 102 insertions(+), 24 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index c98f1637a6f6e..2fd3e85aceea6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -177,7 +177,7 @@ private String getUsage(String cmd) { } else if (cmd.equals("-clrStorageTypeQuota")) { return "\t[-clrStorageTypeQuota ]"; } else if (cmd.equals("-initViewFsToMountTable")) { - return "\t[-initViewFsToMountTable ]"; + return "\t[-initViewFsToMountTable ] | allClusters"; }else if (cmd.equals("-safemode")) { return "\t[-safemode enter | leave | get]"; } else if (cmd.equals("-nameservice")) { @@ -1063,9 +1063,16 @@ private boolean updateQuota(String mount, long nsQuota, long ssQuota) public boolean initViewFsToMountTable(String clusterName) throws IOException { // fs.viewfs.mounttable.ClusterX.link./data - final String mountTablePrefix = - Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." + - Constants.CONFIG_VIEWFS_LINK + "."; + final String mountTablePrefix; + if (clusterName.equals("allClusters")) { + mountTablePrefix = + Constants.CONFIG_VIEWFS_PREFIX + ".*" + + Constants.CONFIG_VIEWFS_LINK + "."; + } else { + mountTablePrefix = + Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." + + Constants.CONFIG_VIEWFS_LINK + "."; + } final String rootPath = "/"; Map viewFsMap = getConf().getValByRegex( mountTablePrefix + rootPath); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index bde035174a00f..cfbc84ea21034 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -251,11 +251,15 @@ For example, use the following [ViewFs](../hadoop-hdfs/ViewFs.html) to configure fs.viewfs.mounttable.ClusterX.link./data hdfs://nn1-clusterx.example.com:8020/data + + fs.viewfs.mounttable.ClusterY.link./project + hdfs://nn1-clustery.example.com:8020/project + The [ViewFs](../hadoop-hdfs/ViewFs.html) mount table can be initialized to the Router by using the following command: - [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -initViewFsToMountTable ClusterX + [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -initViewFsToMountTable ] | allClusters #### Quotas Router-based federation supports global quota at mount table level. Mount table entries may spread multiple subclusters and the global quota will be diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index 9f9758b431662..a750013135491 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.federation.router; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_PREFIX; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createNamenodeReport; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -713,42 +714,108 @@ public void testInitViewFsToMountTable() throws Exception { stateStore.loadCache(MountTableStoreImpl.class, true); String nnAddress = cluster.getRandomNamenode(). getNamenode().getHostAndPort(); + String baseDir = "/initViewFs"; + String src1 = baseDir + "/data1"; + Path destPath1 = new Path("hdfs://" + nnAddress + src1); + String user1 = "user1"; + String group1 = "group1"; + String clusterName1 = "ClusterX"; - String src = "/data"; - Path destPath = new Path("hdfs://" + nnAddress + "/data"); - String user = "user1"; - String group = "group1"; - String clusterName = "ClusterX"; + String src2 = baseDir + "/data2"; + String clusterName2 = "ClusterY"; + + String src3 = baseDir + "/inExistent"; + Path destPath3 = new Path("hdfs://" + nnAddress + src3); + String clusterName3 = "ClusterZ"; // 0.mkdir destPath - hdfs.mkdirs(destPath); + hdfs.mkdirs(destPath1); // 1.set owner - hdfs.setOwner(destPath, user, group); + hdfs.setOwner(destPath1, user1, group1); // 2.set viewFs mapping - admin.getConf().set( - "fs.viewfs.mounttable.ClusterX.link." + src, destPath.toString()); - // 3.run initialization - String[] argv = new String[]{"-initViewFsToMountTable", clusterName}; + // Use different clusterName and mount points + admin.getConf().set(CONFIG_VIEWFS_PREFIX + "." + + clusterName1 + ".link." + src1, destPath1.toString()); + admin.getConf().set(CONFIG_VIEWFS_PREFIX + "." + + clusterName2 + ".link." + src2, destPath1.toString()); + + // 3.run initialization,Specify a ClusterName + String[] argv = new String[]{"-initViewFsToMountTable", clusterName1}; assertEquals(0, ToolRunner.run(admin, argv)); // 4.gets the mount point entries stateStore.loadCache(MountTableStoreImpl.class, true); GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest - .newInstance(src); + .newInstance(src1); GetMountTableEntriesResponse getResponse = client.getMountTableManager() .getMountTableEntries(getRequest); List mountTables = getResponse.getEntries(); - // 5.check + // 5.Checking assertEquals(1, mountTables.size()); MountTable mountTable = mountTables.get(0); List destinations = mountTable.getDestinations(); assertEquals(1, destinations.size()); - assertEquals(user, mountTable.getOwnerName()); - assertEquals(group, mountTable.getGroupName()); - assertEquals(destPath.toUri().getPath(), mountTable. + assertEquals(user1, mountTable.getOwnerName()); + assertEquals(group1, mountTable.getGroupName()); + assertEquals(destPath1.toUri().getPath(), mountTable. getDestinations().get(0).getDest()); assertEquals(nnAddress, mountTable. getDestinations().get(0).getNameserviceId()); - assertEquals(src, mountTable.getSourcePath()); + assertEquals(src1, mountTable.getSourcePath()); + + // Specify allCluster to initialize all mappings + argv = new String[]{"-rm", src1}; + assertEquals(0, ToolRunner.run(admin, argv)); + stateStore.loadCache(MountTableStoreImpl.class, true); + argv = new String[]{"-initViewFsToMountTable", "allClusters"}; + assertEquals(0, ToolRunner.run(admin, argv)); + + stateStore.loadCache(MountTableStoreImpl.class, true); + getRequest = GetMountTableEntriesRequest + .newInstance(baseDir); + getResponse = client.getMountTableManager() + .getMountTableEntries(getRequest); + mountTables = getResponse.getEntries(); + assertEquals(2, mountTables.size()); + for (MountTable mountTable1 : mountTables) { + mountTable1 = mountTables.get(0); + destinations = mountTable1.getDestinations(); + assertEquals(1, destinations.size()); + assertEquals(user1, mountTable1.getOwnerName()); + assertEquals(group1, mountTable1.getGroupName()); + assertEquals(destPath1.toUri().getPath(), mountTable1. + getDestinations().get(0).getDest()); + assertEquals(nnAddress, mountTable1. + getDestinations().get(0).getNameserviceId()); + assertEquals(src1, mountTable1.getSourcePath()); + } + // When the mount directory does not exist + admin.getConf().set(CONFIG_VIEWFS_PREFIX + "." + + clusterName3 + ".link." + src3, destPath3.toString()); + // set user + UserGroupInformation userA = UserGroupInformation.createUserForTesting( + TEST_USER, new String[]{TEST_USER}); + UserGroupInformation.setLoginUser(userA); + argv = new String[]{"-initViewFsToMountTable", clusterName3}; + assertEquals(0, ToolRunner.run(admin, argv)); + + stateStore.loadCache(MountTableStoreImpl.class, true); + getRequest = GetMountTableEntriesRequest + .newInstance(src3); + getResponse = client.getMountTableManager() + .getMountTableEntries(getRequest); + mountTables = getResponse.getEntries(); + // Checking + assertEquals(1, mountTables.size()); + mountTable = mountTables.get(0); + destinations = mountTable.getDestinations(); + assertEquals(1, destinations.size()); + assertEquals(TEST_USER, mountTable.getOwnerName()); + assertEquals(TEST_USER, mountTable.getGroupName()); + assertEquals(destPath3.toUri().getPath(), mountTable. + getDestinations().get(0).getDest()); + assertEquals(nnAddress, mountTable. + getDestinations().get(0).getNameserviceId()); + assertEquals(src3, mountTable.getSourcePath()); } @Test @@ -865,7 +932,7 @@ public void testInvalidArgumentMessage() throws Exception { assertEquals(-1, ToolRunner.run(admin, argv)); System.err.println(out.toString()); assertTrue(out.toString(). - contains("[-initViewFsToMountTable ]")); + contains("[-initViewFsToMountTable ] | allClusters")); out.reset(); argv = new String[] {"-safemode"}; @@ -910,7 +977,7 @@ public void testInvalidArgumentMessage() throws Exception { + " ]\n" + "\t[-clrQuota ]\n" + "\t[-clrStorageTypeQuota ]\n" - + "\t[-initViewFsToMountTable ]\n" + + "\t[-initViewFsToMountTable ] | allClusters\n" + "\t[-safemode enter | leave | get]\n" + "\t[-nameservice enable | disable ]\n" + "\t[-getDisabledNameservices]\n" From 698a3a94ada4348e981cc7ad6e76d213fad4e04f Mon Sep 17 00:00:00 2001 From: zhuxiangyi Date: Wed, 12 May 2021 22:54:18 +0800 Subject: [PATCH 07/11] Address comments --- .../hdfs/tools/federation/RouterAdmin.java | 11 +- .../federation/router/TestRouterAdminCLI.java | 162 +++++++++++------- 2 files changed, 104 insertions(+), 69 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 2fd3e85aceea6..f83f76b4cd405 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -106,6 +106,9 @@ public class RouterAdmin extends Configured implements Tool { /** Pre-compiled regular expressions to detect duplicated slashes. */ private static final Pattern SLASHES = Pattern.compile("/+"); + // Parameter matching when initializing ViewFs mount point. + private static final String ALL_CLUSTERS = "allClusters"; + public static void main(String[] argv) throws Exception { Configuration conf = new HdfsConfiguration(); RouterAdmin admin = new RouterAdmin(conf); @@ -1055,8 +1058,10 @@ private boolean updateQuota(String mount, long nsQuota, long ssQuota) } /** - * initViewFsToMountTable. - * @param clusterName The specified cluster to initialize. + * Initialize the ViewFS mount point to the Router, + * either to specify a cluster or to initialize it all. + * @param clusterName The specified cluster to initialize, + * AllCluster was then all clusters. * @return If the quota was updated. * @throws IOException Error adding the mount point. */ @@ -1064,7 +1069,7 @@ public boolean initViewFsToMountTable(String clusterName) throws IOException { // fs.viewfs.mounttable.ClusterX.link./data final String mountTablePrefix; - if (clusterName.equals("allClusters")) { + if (clusterName.equals(ALL_CLUSTERS)) { mountTablePrefix = Constants.CONFIG_VIEWFS_PREFIX + ".*" + Constants.CONFIG_VIEWFS_LINK + "."; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index a750013135491..d3a9ddb263784 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -90,6 +90,24 @@ public class TestRouterAdminCLI { private static final PrintStream OLD_OUT = System.out; private static final PrintStream OLD_ERR = System.err; + // testInitViewFsToMountTable use + private static final String BASEDIR = "/initViewFs"; + private static final String SRC1 = BASEDIR + "/data1"; + private static final String USER1 = "user1"; + private static final String GROUP1 = "group1"; + private static final String CLUSTER_NAME1 = "ClusterX"; + private static Path destPath1; + + private static final String SRC2 = BASEDIR + "/data2"; + private static final String CLUSTER_NAME2 = "ClusterY"; + + private static final String SRC3 = BASEDIR + "/inExistent"; + private static Path destPath3; + + private static String nnAddress; + + + @BeforeClass public static void globalSetUp() throws Exception { cluster = new StateStoreDFSCluster(false, 1, @@ -707,115 +725,127 @@ public void testAddMountTableIfParentExist() throws Exception { } } - @Test - public void testInitViewFsToMountTable() throws Exception { - // re-set system out for testing - System.setOut(new PrintStream(out)); - stateStore.loadCache(MountTableStoreImpl.class, true); - String nnAddress = cluster.getRandomNamenode(). + public void setInitViewFsToMountEnv() throws IOException { + nnAddress = cluster.getRandomNamenode(). getNamenode().getHostAndPort(); - String baseDir = "/initViewFs"; - String src1 = baseDir + "/data1"; - Path destPath1 = new Path("hdfs://" + nnAddress + src1); - String user1 = "user1"; - String group1 = "group1"; - String clusterName1 = "ClusterX"; - - String src2 = baseDir + "/data2"; - String clusterName2 = "ClusterY"; - - String src3 = baseDir + "/inExistent"; - Path destPath3 = new Path("hdfs://" + nnAddress + src3); - String clusterName3 = "ClusterZ"; - - // 0.mkdir destPath + destPath1 = new Path("hdfs://" + nnAddress + SRC1); + destPath3 = new Path("hdfs://" + nnAddress + SRC3); hdfs.mkdirs(destPath1); - // 1.set owner - hdfs.setOwner(destPath1, user1, group1); - // 2.set viewFs mapping - // Use different clusterName and mount points + hdfs.setOwner(destPath1, USER1, GROUP1); admin.getConf().set(CONFIG_VIEWFS_PREFIX + "." + - clusterName1 + ".link." + src1, destPath1.toString()); + CLUSTER_NAME1 + ".link." + SRC1, destPath1.toString()); admin.getConf().set(CONFIG_VIEWFS_PREFIX + "." + - clusterName2 + ".link." + src2, destPath1.toString()); + CLUSTER_NAME1 + ".link." + SRC2, destPath1.toString()); + } - // 3.run initialization,Specify a ClusterName - String[] argv = new String[]{"-initViewFsToMountTable", clusterName1}; + @Test + public void testInitViewFsToMountTableWithSpecificCluster() throws Exception { + // re-set system out for testing + System.setOut(new PrintStream(out)); + stateStore.loadCache(MountTableStoreImpl.class, true); + // 1.Initialize the environment + setInitViewFsToMountEnv(); + // 2.Run initialization,Specify a ClusterName + String[] argv = new String[]{"-initViewFsToMountTable", CLUSTER_NAME1}; assertEquals(0, ToolRunner.run(admin, argv)); - // 4.gets the mount point entries + // 3.Gets the mount point entries stateStore.loadCache(MountTableStoreImpl.class, true); GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest - .newInstance(src1); + .newInstance(SRC1); GetMountTableEntriesResponse getResponse = client.getMountTableManager() .getMountTableEntries(getRequest); List mountTables = getResponse.getEntries(); - // 5.Checking + // 4.Checking assertEquals(1, mountTables.size()); MountTable mountTable = mountTables.get(0); List destinations = mountTable.getDestinations(); assertEquals(1, destinations.size()); - assertEquals(user1, mountTable.getOwnerName()); - assertEquals(group1, mountTable.getGroupName()); + assertEquals(USER1, mountTable.getOwnerName()); + assertEquals(GROUP1, mountTable.getGroupName()); assertEquals(destPath1.toUri().getPath(), mountTable. getDestinations().get(0).getDest()); assertEquals(nnAddress, mountTable. getDestinations().get(0).getNameserviceId()); - assertEquals(src1, mountTable.getSourcePath()); - - // Specify allCluster to initialize all mappings - argv = new String[]{"-rm", src1}; + assertEquals(SRC1, mountTable.getSourcePath()); + // 5.Clear up + argv = new String[]{"-rm", SRC1}; assertEquals(0, ToolRunner.run(admin, argv)); + } + + @Test + public void testInitViewFsToMountTableWithAllCluster() throws Exception { + // re-set system out for testing + System.setOut(new PrintStream(out)); stateStore.loadCache(MountTableStoreImpl.class, true); - argv = new String[]{"-initViewFsToMountTable", "allClusters"}; + // 1.Initialize the environment + setInitViewFsToMountEnv(); + // 2.Specify allCluster to initialize all mappings + stateStore.loadCache(MountTableStoreImpl.class, true); + String[] argv = new String[]{"-initViewFsToMountTable", "allClusters"}; assertEquals(0, ToolRunner.run(admin, argv)); - + // 3.Gets the mount point entries stateStore.loadCache(MountTableStoreImpl.class, true); - getRequest = GetMountTableEntriesRequest - .newInstance(baseDir); - getResponse = client.getMountTableManager() + GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest + .newInstance(BASEDIR); + GetMountTableEntriesResponse getResponse = client.getMountTableManager() .getMountTableEntries(getRequest); - mountTables = getResponse.getEntries(); + List mountTables = getResponse.getEntries(); assertEquals(2, mountTables.size()); + // 3.Checking for (MountTable mountTable1 : mountTables) { - mountTable1 = mountTables.get(0); - destinations = mountTable1.getDestinations(); + List destinations = mountTable1.getDestinations(); assertEquals(1, destinations.size()); - assertEquals(user1, mountTable1.getOwnerName()); - assertEquals(group1, mountTable1.getGroupName()); + assertEquals(USER1, mountTable1.getOwnerName()); + assertEquals(GROUP1, mountTable1.getGroupName()); assertEquals(destPath1.toUri().getPath(), mountTable1. getDestinations().get(0).getDest()); assertEquals(nnAddress, mountTable1. getDestinations().get(0).getNameserviceId()); - assertEquals(src1, mountTable1.getSourcePath()); } + assertEquals(SRC1, mountTables.get(0).getSourcePath()); + assertEquals(SRC2, mountTables.get(1).getSourcePath()); + // 5.Clear up + argv = new String[]{"-rm", SRC1}; + assertEquals(0, ToolRunner.run(admin, argv)); + argv = new String[]{"-rm", SRC2}; + assertEquals(0, ToolRunner.run(admin, argv)); + } + + @Test + public void testInitViewFsToMountTableMountNoExist() throws Exception { + // re-set system out for testing + System.setOut(new PrintStream(out)); + stateStore.loadCache(MountTableStoreImpl.class, true); + // 1.Initialize the environment + setInitViewFsToMountEnv(); // When the mount directory does not exist + String clusterName3 = "ClusterZ"; admin.getConf().set(CONFIG_VIEWFS_PREFIX + "." + - clusterName3 + ".link." + src3, destPath3.toString()); - // set user - UserGroupInformation userA = UserGroupInformation.createUserForTesting( - TEST_USER, new String[]{TEST_USER}); - UserGroupInformation.setLoginUser(userA); - argv = new String[]{"-initViewFsToMountTable", clusterName3}; + clusterName3 + ".link." + SRC3, destPath3.toString()); + // 2.Run initialization,Specify a ClusterName + String[] argv = new String[]{"-initViewFsToMountTable", clusterName3}; assertEquals(0, ToolRunner.run(admin, argv)); - + // 3.Gets the mount point entries stateStore.loadCache(MountTableStoreImpl.class, true); - getRequest = GetMountTableEntriesRequest - .newInstance(src3); - getResponse = client.getMountTableManager() + GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest + .newInstance(SRC3); + GetMountTableEntriesResponse getResponse = client.getMountTableManager() .getMountTableEntries(getRequest); - mountTables = getResponse.getEntries(); - // Checking + List mountTables = getResponse.getEntries(); + // 4.Checking assertEquals(1, mountTables.size()); - mountTable = mountTables.get(0); - destinations = mountTable.getDestinations(); + MountTable mountTable = mountTables.get(0); + List destinations = mountTable.getDestinations(); assertEquals(1, destinations.size()); - assertEquals(TEST_USER, mountTable.getOwnerName()); - assertEquals(TEST_USER, mountTable.getGroupName()); + assertEquals(System.getProperty("user.name"), mountTable.getOwnerName()); assertEquals(destPath3.toUri().getPath(), mountTable. getDestinations().get(0).getDest()); assertEquals(nnAddress, mountTable. getDestinations().get(0).getNameserviceId()); - assertEquals(src3, mountTable.getSourcePath()); + assertEquals(SRC3, mountTable.getSourcePath()); + // 5.Clear up + argv = new String[]{"-rm", SRC3}; + assertEquals(0, ToolRunner.run(admin, argv)); } @Test From 33701470f5825366a00169e05cb02c0cc54a6bab Mon Sep 17 00:00:00 2001 From: zhuxiangyi Date: Thu, 13 May 2021 12:39:50 +0800 Subject: [PATCH 08/11] Address comments --- .../apache/hadoop/hdfs/tools/federation/RouterAdmin.java | 7 ++++--- .../hdfs/server/federation/router/TestRouterAdminCLI.java | 6 +++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index f83f76b4cd405..7445328ba1b43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -180,7 +180,7 @@ private String getUsage(String cmd) { } else if (cmd.equals("-clrStorageTypeQuota")) { return "\t[-clrStorageTypeQuota ]"; } else if (cmd.equals("-initViewFsToMountTable")) { - return "\t[-initViewFsToMountTable ] | allClusters"; + return "\t[-initViewFsToMountTable | allClusters]"; }else if (cmd.equals("-safemode")) { return "\t[-safemode enter | leave | get]"; } else if (cmd.equals("-nameservice")) { @@ -403,6 +403,7 @@ public int run(String[] argv) throws Exception { System.out.println("Successfully init ViewFs mapping to router " + argv[i]); } else { + System.err.println("Failed when execute command initViewFsToMountTable"); exitCode = -1; } } else if ("-refreshRouterArgs".equals(cmd)) { @@ -1091,7 +1092,7 @@ public boolean initViewFsToMountTable(String clusterName) String mountKey = entry.getKey(); DestinationOrder order = DestinationOrder.HASH; String mount = mountKey.replaceAll(mountTablePrefix, ""); - if (!destUri.getScheme().equals("hdfs")) { + if (!destUri.getScheme().equals(HdfsConstants.HDFS_URI_SCHEME)) { System.out.println("Only supports HDFS, " + "added Mount Point failed , " + mountKey); } @@ -1115,7 +1116,7 @@ public boolean initViewFsToMountTable(String clusterName) * Returns ACLEntity according to a HDFS pat. * @param path A path of HDFS. */ - static public ACLEntity getACLEntityFormHdfsPath( + static private ACLEntity getACLEntityFormHdfsPath( Path path, Configuration conf) { String owner = null; String group = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index d3a9ddb263784..62579605bc4fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -735,7 +735,7 @@ public void setInitViewFsToMountEnv() throws IOException { admin.getConf().set(CONFIG_VIEWFS_PREFIX + "." + CLUSTER_NAME1 + ".link." + SRC1, destPath1.toString()); admin.getConf().set(CONFIG_VIEWFS_PREFIX + "." + - CLUSTER_NAME1 + ".link." + SRC2, destPath1.toString()); + CLUSTER_NAME2 + ".link." + SRC2, destPath1.toString()); } @Test @@ -962,7 +962,7 @@ public void testInvalidArgumentMessage() throws Exception { assertEquals(-1, ToolRunner.run(admin, argv)); System.err.println(out.toString()); assertTrue(out.toString(). - contains("[-initViewFsToMountTable ] | allClusters")); + contains("[-initViewFsToMountTable | allClusters]")); out.reset(); argv = new String[] {"-safemode"}; @@ -1007,7 +1007,7 @@ public void testInvalidArgumentMessage() throws Exception { + " ]\n" + "\t[-clrQuota ]\n" + "\t[-clrStorageTypeQuota ]\n" - + "\t[-initViewFsToMountTable ] | allClusters\n" + + "\t[-initViewFsToMountTable | allClusters]\n" + "\t[-safemode enter | leave | get]\n" + "\t[-nameservice enable | disable ]\n" + "\t[-getDisabledNameservices]\n" From 93dbc90ef7d3c3ee2fee43a84706248a2f3db9b3 Mon Sep 17 00:00:00 2001 From: zhuxiangyi Date: Thu, 13 May 2021 14:08:32 +0800 Subject: [PATCH 09/11] Address comments --- .../hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index cfbc84ea21034..76ab0b570d594 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -259,7 +259,7 @@ For example, use the following [ViewFs](../hadoop-hdfs/ViewFs.html) to configure The [ViewFs](../hadoop-hdfs/ViewFs.html) mount table can be initialized to the Router by using the following command: - [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -initViewFsToMountTable ] | allClusters + [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -initViewFsToMountTable | allClusters] #### Quotas Router-based federation supports global quota at mount table level. Mount table entries may spread multiple subclusters and the global quota will be From 0204cdcfe14b729a8a189379b3e6e2cefc28f843 Mon Sep 17 00:00:00 2001 From: zhuxiangyi Date: Thu, 13 May 2021 14:13:28 +0800 Subject: [PATCH 10/11] Fix doc formatting errors --- .../hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index 76ab0b570d594..32230f744035b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -259,7 +259,7 @@ For example, use the following [ViewFs](../hadoop-hdfs/ViewFs.html) to configure The [ViewFs](../hadoop-hdfs/ViewFs.html) mount table can be initialized to the Router by using the following command: - [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -initViewFsToMountTable | allClusters] + [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -initViewFsToMountTable [ | allClusters] #### Quotas Router-based federation supports global quota at mount table level. Mount table entries may spread multiple subclusters and the global quota will be From bc5089e4eaf5828f5ddb48cbc6e69219ad18702c Mon Sep 17 00:00:00 2001 From: zhuxiangyi Date: Thu, 13 May 2021 14:48:52 +0800 Subject: [PATCH 11/11] fix checkstyle --- .../org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 7445328ba1b43..8015de855d937 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -403,7 +403,8 @@ public int run(String[] argv) throws Exception { System.out.println("Successfully init ViewFs mapping to router " + argv[i]); } else { - System.err.println("Failed when execute command initViewFsToMountTable"); + System.err.println( + "Failed when execute command initViewFsToMountTable"); exitCode = -1; } } else if ("-refreshRouterArgs".equals(cmd)) {