diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java index b1fcc0c6b4c0b..85044399f9815 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java @@ -125,6 +125,9 @@ private static void setupServlets( RouterFsckServlet.PATH_SPEC, RouterFsckServlet.class, true); + httpServer.addInternalServlet(RouterNetworkTopologyServlet.SERVLET_NAME, + RouterNetworkTopologyServlet.PATH_SPEC, + RouterNetworkTopologyServlet.class); } public InetSocketAddress getHttpAddress() { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNetworkTopologyServlet.java new file mode 100644 index 0000000000000..e517066c81c20 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNetworkTopologyServlet.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.namenode.NetworkTopologyServlet; +import org.apache.hadoop.net.Node; +import org.apache.hadoop.util.StringUtils; + +import javax.servlet.ServletContext; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.io.PrintStream; +import java.util.Arrays; +import java.util.List; + +/** + * A servlet to print out the network topology from router. + */ +public class RouterNetworkTopologyServlet extends NetworkTopologyServlet { + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws IOException { + final ServletContext context = getServletContext(); + + String format = parseAcceptHeader(request); + if (FORMAT_TEXT.equals(format)) { + response.setContentType("text/plain; charset=UTF-8"); + } else if (FORMAT_JSON.equals(format)) { + response.setContentType("application/json; charset=UTF-8"); + } + + Router router = RouterHttpServer.getRouterFromContext(context); + DatanodeInfo[] datanodeReport = + router.getRpcServer().getDatanodeReport( + HdfsConstants.DatanodeReportType.ALL); + List datanodeInfos = Arrays.asList(datanodeReport); + + try (PrintStream out = new PrintStream( + response.getOutputStream(), false, "UTF-8")) { + printTopology(out, datanodeInfos, format); + } catch (Throwable t) { + String errMsg = "Print network topology failed. " + + StringUtils.stringifyException(t); + response.sendError(HttpServletResponse.SC_GONE, errMsg); + throw new IOException(errMsg); + } finally { + response.getOutputStream().close(); + } + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html index 3150d87bdce62..42386d3c30c11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html @@ -48,6 +48,7 @@
  • Metrics
  • Configuration
  • Process Thread Dump
  • +
  • Network Topology
  • diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html index eca395ff4b2a9..80b4b3b39f903 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html @@ -52,6 +52,7 @@
  • Metrics
  • Configuration
  • Process Thread Dump
  • +
  • Network Topology
  • diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNetworkTopologyServlet.java new file mode 100644 index 0000000000000..e120c69007ee5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNetworkTopologyServlet.java @@ -0,0 +1,210 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster; +import org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver; +import org.apache.hadoop.io.IOUtils; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.Iterator; +import java.util.Map; + +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HTTP_ENABLE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class TestRouterNetworkTopologyServlet { + + private static StateStoreDFSCluster clusterWithDatanodes; + private static StateStoreDFSCluster clusterNoDatanodes; + + @BeforeClass + public static void setUp() throws Exception { + // Builder configuration + Configuration routerConf = + new RouterConfigBuilder().stateStore().admin().quota().rpc().build(); + routerConf.set(DFS_ROUTER_HTTP_ENABLE, "true"); + Configuration hdfsConf = new Configuration(false); + + // Build and start a federated cluster + clusterWithDatanodes = new StateStoreDFSCluster(false, 2, + MultipleDestinationMountTableResolver.class); + clusterWithDatanodes.addNamenodeOverrides(hdfsConf); + clusterWithDatanodes.addRouterOverrides(routerConf); + clusterWithDatanodes.setNumDatanodesPerNameservice(9); + clusterWithDatanodes.setIndependentDNs(); + clusterWithDatanodes.setRacks( + new String[] {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2", + "/rack2", "/rack3", "/rack3", "/rack3", "/rack4", "/rack4", + "/rack4", "/rack5", "/rack5", "/rack5", "/rack6", "/rack6", + "/rack6"}); + clusterWithDatanodes.startCluster(); + clusterWithDatanodes.startRouters(); + clusterWithDatanodes.waitClusterUp(); + clusterWithDatanodes.waitActiveNamespaces(); + + // Build and start a federated cluster + clusterNoDatanodes = new StateStoreDFSCluster(false, 2, + MultipleDestinationMountTableResolver.class); + clusterNoDatanodes.addNamenodeOverrides(hdfsConf); + clusterNoDatanodes.addRouterOverrides(routerConf); + clusterNoDatanodes.setNumDatanodesPerNameservice(0); + clusterNoDatanodes.setIndependentDNs(); + clusterNoDatanodes.startCluster(); + clusterNoDatanodes.startRouters(); + clusterNoDatanodes.waitClusterUp(); + clusterNoDatanodes.waitActiveNamespaces(); + } + + @Test + public void testPrintTopologyTextFormat() throws Exception { + // get http Address + String httpAddress = clusterWithDatanodes.getRandomRouter().getRouter() + .getHttpServerAddress().toString(); + + // send http request + URL url = new URL("http:/" + httpAddress + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.connect(); + + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + StringBuilder sb = + new StringBuilder("-- Network Topology -- \n"); + sb.append(out); + sb.append("\n-- Network Topology -- "); + String topology = sb.toString(); + + // assert rack info + assertTrue(topology.contains("/ns0/rack1")); + assertTrue(topology.contains("/ns0/rack2")); + assertTrue(topology.contains("/ns0/rack3")); + assertTrue(topology.contains("/ns1/rack4")); + assertTrue(topology.contains("/ns1/rack5")); + assertTrue(topology.contains("/ns1/rack6")); + + // assert node number + assertEquals(18, + topology.split("127.0.0.1").length - 1); + } + + @Test + public void testPrintTopologyJsonFormat() throws Exception { + // get http Address + String httpAddress = clusterWithDatanodes.getRandomRouter().getRouter() + .getHttpServerAddress().toString(); + + // send http request + URL url = new URL("http:/" + httpAddress + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.setRequestProperty("Accept", "application/json"); + conn.connect(); + + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + String topology = out.toString(); + + // parse json + JsonNode racks = new ObjectMapper().readTree(topology); + + // assert rack number + assertEquals(6, racks.size()); + + // assert rack info + assertTrue(topology.contains("/ns0/rack1")); + assertTrue(topology.contains("/ns0/rack2")); + assertTrue(topology.contains("/ns0/rack3")); + assertTrue(topology.contains("/ns1/rack4")); + assertTrue(topology.contains("/ns1/rack5")); + assertTrue(topology.contains("/ns1/rack6")); + + // assert node number + Iterator elements = racks.elements(); + int dataNodesCount = 0; + while(elements.hasNext()){ + JsonNode rack = elements.next(); + Iterator> fields = rack.fields(); + while (fields.hasNext()) { + dataNodesCount += fields.next().getValue().size(); + } + } + assertEquals(18, dataNodesCount); + } + + @Test + public void testPrintTopologyNoDatanodesTextFormat() throws Exception { + // get http Address + String httpAddress = clusterNoDatanodes.getRandomRouter().getRouter() + .getHttpServerAddress().toString(); + + // send http request + URL url = new URL("http:/" + httpAddress + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.connect(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + StringBuilder sb = + new StringBuilder("-- Network Topology -- \n"); + sb.append(out); + sb.append("\n-- Network Topology -- "); + String topology = sb.toString(); + + // assert node number + assertTrue(topology.contains("No DataNodes")); + } + + @Test + public void testPrintTopologyNoDatanodesJsonFormat() throws Exception { + // get http Address + String httpAddress = clusterNoDatanodes.getRandomRouter().getRouter() + .getHttpServerAddress().toString(); + + // send http request + URL url = new URL("http:/" + httpAddress + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.setRequestProperty("Accept", "application/json"); + conn.connect(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + StringBuilder sb = + new StringBuilder("-- Network Topology -- \n"); + sb.append(out); + sb.append("\n-- Network Topology -- "); + String topology = sb.toString(); + + // assert node number + assertTrue(topology.contains("No DataNodes")); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 7ca52417d9a0a..c05398a31cec3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -253,7 +253,7 @@ private static void setupServlets(HttpServer2 httpServer) { httpServer.addInternalServlet(IsNameNodeActiveServlet.SERVLET_NAME, IsNameNodeActiveServlet.PATH_SPEC, IsNameNodeActiveServlet.class); - httpServer.addInternalServlet("topology", + httpServer.addInternalServlet(NetworkTopologyServlet.SERVLET_NAME, NetworkTopologyServlet.PATH_SPEC, NetworkTopologyServlet.class); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java index 5d089718ccffe..c07d596d696da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java @@ -46,6 +46,7 @@ @InterfaceAudience.Private public class NetworkTopologyServlet extends DfsServlet { + public static final String SERVLET_NAME = "topology"; public static final String PATH_SPEC = "/topology"; protected static final String FORMAT_JSON = "json"; @@ -90,7 +91,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) * @param leaves leaves nodes under base scope * @param format the response format */ - public void printTopology(PrintStream stream, List leaves, + protected void printTopology(PrintStream stream, List leaves, String format) throws BadFormatException, IOException { if (leaves.isEmpty()) { stream.print("No DataNodes"); @@ -120,7 +121,7 @@ public void printTopology(PrintStream stream, List leaves, } } - private void printJsonFormat(PrintStream stream, Map> tree, ArrayList racks) throws IOException { JsonFactory dumpFactory = new JsonFactory(); JsonGenerator dumpGenerator = dumpFactory.createGenerator(stream); @@ -152,7 +153,7 @@ private void printJsonFormat(PrintStream stream, Map> tree, ArrayList racks) { for(String r : racks) { stream.println("Rack: " + r); @@ -171,7 +172,7 @@ private void printTextFormat(PrintStream stream, Map