From c0639d4de195c9c01e2683dc205819e69bfc451a Mon Sep 17 00:00:00 2001 From: tomscut Date: Mon, 12 Apr 2021 21:09:22 +0800 Subject: [PATCH 1/5] HDFS-15970. Print network topology on web --- .../server/namenode/NameNodeHttpServer.java | 6 +- .../namenode/NetworkTopologyServlet.java | 115 ++++++++++++++++++ .../src/main/webapps/hdfs/dfshealth.html | 1 + .../src/main/webapps/hdfs/explorer.html | 1 + .../namenode/TestNetworkTopologyServlet.java | 115 ++++++++++++++++++ 5 files changed, 236 insertions(+), 2 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 33913227af2ce..7ca52417d9a0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -166,7 +166,7 @@ void start() throws IOException { httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn); httpServer.setAttribute(JspHelper.CURRENT_CONF, conf); - setupServlets(httpServer, conf); + setupServlets(httpServer); httpServer.start(); int connIdx = 0; @@ -243,7 +243,7 @@ void setAliasMap(InMemoryAliasMap aliasMap) { httpServer.setAttribute(ALIASMAP_ATTRIBUTE_KEY, aliasMap); } - private static void setupServlets(HttpServer2 httpServer, Configuration conf) { + private static void setupServlets(HttpServer2 httpServer) { httpServer.addInternalServlet("startupProgress", StartupProgressServlet.PATH_SPEC, StartupProgressServlet.class); httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class, @@ -253,6 +253,8 @@ private static void setupServlets(HttpServer2 httpServer, Configuration conf) { httpServer.addInternalServlet(IsNameNodeActiveServlet.SERVLET_NAME, IsNameNodeActiveServlet.PATH_SPEC, IsNameNodeActiveServlet.class); + httpServer.addInternalServlet("topology", + NetworkTopologyServlet.PATH_SPEC, NetworkTopologyServlet.class); } static FSImage getFsImageFromContext(ServletContext context) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java new file mode 100644 index 0000000000000..4b28c3076448c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java @@ -0,0 +1,115 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.net.Node; +import org.apache.hadoop.net.NodeBase; +import org.apache.hadoop.util.StringUtils; + +import javax.servlet.ServletContext; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.io.PrintStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.TreeSet; + +/** + * A servlet to print out the network topology. + */ +@InterfaceAudience.Private +public class NetworkTopologyServlet extends DfsServlet { + + public static final String PATH_SPEC = "/topology"; + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws IOException { + final ServletContext context = getServletContext(); + NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); + BlockManager bm = nn.getNamesystem().getBlockManager(); + List leaves = bm.getDatanodeManager().getNetworkTopology() + .getLeaves(NodeBase.ROOT); + + response.setContentType("text/plain; charset=UTF-8"); + try (PrintStream out = new PrintStream( + response.getOutputStream(), false, "UTF-8")) { + printTopology(out, leaves); + } catch (Throwable t) { + String errMsg = "Print network topology failed. " + + StringUtils.stringifyException(t); + response.sendError(HttpServletResponse.SC_GONE, errMsg); + throw new IOException(errMsg); + } finally { + response.getOutputStream().close(); + } + } + + /** + * Display each rack and the nodes assigned to that rack, as determined + * by the NameNode, in a hierarchical manner. The nodes and racks are + * sorted alphabetically. + * + * @param stream print stream + * @param leaves leaves nodes under base scope + */ + public void printTopology(PrintStream stream, List leaves) { + if (leaves.size() == 0) { + stream.print("No DataNodes"); + return; + } + + // Build a map of rack -> nodes from the datanode report + HashMap> tree = new HashMap>(); + for(Node dni : leaves) { + String location = dni.getNetworkLocation(); + String name = dni.getName(); + + if(!tree.containsKey(location)) { + tree.put(location, new TreeSet()); + } + + tree.get(location).add(name); + } + + // Sort the racks (and nodes) alphabetically, display in order + ArrayList racks = new ArrayList(tree.keySet()); + Collections.sort(racks); + + for(String r : racks) { + stream.println("Rack: " + r); + TreeSet nodes = tree.get(r); + + for(String n : nodes) { + stream.print(" " + n); + String hostname = NetUtils.getHostNameOfIP(n); + if(hostname != null) { + stream.print(" (" + hostname + ")"); + } + stream.println(); + } + stream.println(); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index 6e4eade9566d7..8622e4d3a5681 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -52,6 +52,7 @@
  • Metrics
  • Configuration
  • Process Thread Dump
  • +
  • Network Topology
  • diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html index 73bfbd4527f48..3f0509a229700 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html @@ -48,6 +48,7 @@
  • Metrics
  • Configuration
  • Process Thread Dump
  • +
  • Network Topology
  • diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java new file mode 100644 index 0000000000000..171536b732273 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java @@ -0,0 +1,115 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.net.StaticMapping; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.ArrayList; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class TestNetworkTopologyServlet { + + @Test + public void testPrintTopology() throws IOException { + StaticMapping.resetMap(); + Configuration conf = new HdfsConfiguration(); + int dataNodesNum = 0; + final ArrayList rackList = new ArrayList(); + for (int i = 0; i < 5; i++) { + for (int j = 0; j < 2; j++) { + rackList.add("/rack" + i); + dataNodesNum++; + } + } + + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(dataNodesNum) + .racks(rackList.toArray(new String[rackList.size()])) + .build(); + cluster.waitActive(); + + // get http uri + String httpUri = cluster.getHttpUri(0); + + // send http request + URL url = new URL(httpUri + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.connect(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + StringBuilder sb = + new StringBuilder("-- Network Topology -- \n"); + sb.append(out); + sb.append("\n-- Network Topology -- "); + String topology = sb.toString(); + + // assert rack info + assertTrue(topology.contains("/rack0")); + assertTrue(topology.contains("/rack1")); + assertTrue(topology.contains("/rack2")); + assertTrue(topology.contains("/rack3")); + assertTrue(topology.contains("/rack4")); + + // assert node number + assertEquals(topology.split("127.0.0.1").length - 1, + dataNodesNum); + } + + @Test + public void testPrintTopologyNoDatanodes() throws IOException { + StaticMapping.resetMap(); + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .build(); + cluster.waitActive(); + + // get http uri + String httpUri = cluster.getHttpUri(0); + + // send http request + URL url = new URL(httpUri + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.connect(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + StringBuilder sb = + new StringBuilder("-- Network Topology -- \n"); + sb.append(out); + sb.append("\n-- Network Topology -- "); + String topology = sb.toString(); + + // assert node number + assertTrue(topology.contains("No DataNodes")); + } +} From 30daa9a064af200ebd3e79df6893273b517cb308 Mon Sep 17 00:00:00 2001 From: tomscut Date: Tue, 13 Apr 2021 09:19:47 +0800 Subject: [PATCH 2/5] fix checkstyle --- .../hadoop/hdfs/server/namenode/NetworkTopologyServlet.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java index 4b28c3076448c..5f41aadc2d446 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java @@ -81,7 +81,8 @@ public void printTopology(PrintStream stream, List leaves) { } // Build a map of rack -> nodes from the datanode report - HashMap> tree = new HashMap>(); + HashMap> tree = + new HashMap>(); for(Node dni : leaves) { String location = dni.getNetworkLocation(); String name = dni.getName(); From 4934adbcd5d1a616e670348a07c4b4ac9f1d15ee Mon Sep 17 00:00:00 2001 From: tomscut Date: Tue, 13 Apr 2021 10:42:12 +0800 Subject: [PATCH 3/5] update comments --- .../hadoop/hdfs/server/namenode/NetworkTopologyServlet.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java index 5f41aadc2d446..bdac1d1cc002d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java @@ -80,7 +80,7 @@ public void printTopology(PrintStream stream, List leaves) { return; } - // Build a map of rack -> nodes from the datanode report + // Build a map of rack -> nodes HashMap> tree = new HashMap>(); for(Node dni : leaves) { From 6a97f6a99f2809aed0f352446b184c6a33445731 Mon Sep 17 00:00:00 2001 From: tomscut Date: Tue, 13 Apr 2021 12:16:33 +0800 Subject: [PATCH 4/5] optimize the code --- .../server/namenode/NetworkTopologyServlet.java | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java index bdac1d1cc002d..9b8d3fa36bedf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java @@ -33,6 +33,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.TreeSet; /** @@ -75,27 +76,23 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) * @param leaves leaves nodes under base scope */ public void printTopology(PrintStream stream, List leaves) { - if (leaves.size() == 0) { + if (leaves.isEmpty()) { stream.print("No DataNodes"); return; } // Build a map of rack -> nodes - HashMap> tree = - new HashMap>(); + Map> tree = new HashMap<>(); for(Node dni : leaves) { String location = dni.getNetworkLocation(); String name = dni.getName(); - if(!tree.containsKey(location)) { - tree.put(location, new TreeSet()); - } - + tree.putIfAbsent(location, new TreeSet<>()); tree.get(location).add(name); } // Sort the racks (and nodes) alphabetically, display in order - ArrayList racks = new ArrayList(tree.keySet()); + ArrayList racks = new ArrayList<>(tree.keySet()); Collections.sort(racks); for(String r : racks) { From 4fc4bfa78aa50d0735bd6a02199707afeda6cccb Mon Sep 17 00:00:00 2001 From: tomscut Date: Sat, 17 Apr 2021 00:39:46 +0800 Subject: [PATCH 5/5] support json format --- .../namenode/NetworkTopologyServlet.java | 80 +++++++++++++++- .../namenode/TestNetworkTopologyServlet.java | 91 ++++++++++++++++++- 2 files changed, 166 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java index 9b8d3fa36bedf..5d089718ccffe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java @@ -17,16 +17,20 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.util.StringUtils; import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.core.HttpHeaders; import java.io.IOException; import java.io.PrintStream; import java.util.ArrayList; @@ -44,19 +48,29 @@ public class NetworkTopologyServlet extends DfsServlet { public static final String PATH_SPEC = "/topology"; + protected static final String FORMAT_JSON = "json"; + protected static final String FORMAT_TEXT = "text"; + @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { final ServletContext context = getServletContext(); + + String format = parseAcceptHeader(request); + if (FORMAT_TEXT.equals(format)) { + response.setContentType("text/plain; charset=UTF-8"); + } else if (FORMAT_JSON.equals(format)) { + response.setContentType("application/json; charset=UTF-8"); + } + NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); BlockManager bm = nn.getNamesystem().getBlockManager(); List leaves = bm.getDatanodeManager().getNetworkTopology() .getLeaves(NodeBase.ROOT); - response.setContentType("text/plain; charset=UTF-8"); try (PrintStream out = new PrintStream( response.getOutputStream(), false, "UTF-8")) { - printTopology(out, leaves); + printTopology(out, leaves, format); } catch (Throwable t) { String errMsg = "Print network topology failed. " + StringUtils.stringifyException(t); @@ -74,8 +88,10 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) * * @param stream print stream * @param leaves leaves nodes under base scope + * @param format the response format */ - public void printTopology(PrintStream stream, List leaves) { + public void printTopology(PrintStream stream, List leaves, + String format) throws BadFormatException, IOException { if (leaves.isEmpty()) { stream.print("No DataNodes"); return; @@ -95,6 +111,49 @@ public void printTopology(PrintStream stream, List leaves) { ArrayList racks = new ArrayList<>(tree.keySet()); Collections.sort(racks); + if (FORMAT_JSON.equals(format)) { + printJsonFormat(stream, tree, racks); + } else if (FORMAT_TEXT.equals(format)) { + printTextFormat(stream, tree, racks); + } else { + throw new BadFormatException("Bad format: " + format); + } + } + + private void printJsonFormat(PrintStream stream, Map> tree, ArrayList racks) throws IOException { + JsonFactory dumpFactory = new JsonFactory(); + JsonGenerator dumpGenerator = dumpFactory.createGenerator(stream); + dumpGenerator.writeStartArray(); + + for(String r : racks) { + dumpGenerator.writeStartObject(); + dumpGenerator.writeFieldName(r); + TreeSet nodes = tree.get(r); + dumpGenerator.writeStartArray(); + + for(String n : nodes) { + dumpGenerator.writeStartObject(); + dumpGenerator.writeStringField("ip", n); + String hostname = NetUtils.getHostNameOfIP(n); + if(hostname != null) { + dumpGenerator.writeStringField("hostname", hostname); + } + dumpGenerator.writeEndObject(); + } + dumpGenerator.writeEndArray(); + dumpGenerator.writeEndObject(); + } + dumpGenerator.writeEndArray(); + dumpGenerator.flush(); + + if (!dumpGenerator.isClosed()) { + dumpGenerator.close(); + } + } + + private void printTextFormat(PrintStream stream, Map> tree, ArrayList racks) { for(String r : racks) { stream.println("Rack: " + r); TreeSet nodes = tree.get(r); @@ -110,4 +169,19 @@ public void printTopology(PrintStream stream, List leaves) { stream.println(); } } + + @VisibleForTesting + static String parseAcceptHeader(HttpServletRequest request) { + String format = request.getHeader(HttpHeaders.ACCEPT); + return format != null && format.contains(FORMAT_JSON) ? + FORMAT_JSON : FORMAT_TEXT; + } + + public static class BadFormatException extends Exception { + private static final long serialVersionUID = 1L; + + public BadFormatException(String msg) { + super(msg); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java index 171536b732273..7796ed4182ee6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -29,6 +31,8 @@ import java.net.HttpURLConnection; import java.net.URL; import java.util.ArrayList; +import java.util.Iterator; +import java.util.Map; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -36,7 +40,7 @@ public class TestNetworkTopologyServlet { @Test - public void testPrintTopology() throws IOException { + public void testPrintTopologyTextFormat() throws IOException { StaticMapping.resetMap(); Configuration conf = new HdfsConfiguration(); int dataNodesNum = 0; @@ -84,7 +88,59 @@ public void testPrintTopology() throws IOException { } @Test - public void testPrintTopologyNoDatanodes() throws IOException { + public void testPrintTopologyJsonFormat() throws IOException { + StaticMapping.resetMap(); + Configuration conf = new HdfsConfiguration(); + int dataNodesNum = 0; + final ArrayList rackList = new ArrayList(); + for (int i = 0; i < 5; i++) { + for (int j = 0; j < 2; j++) { + rackList.add("/rack" + i); + dataNodesNum++; + } + } + + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(dataNodesNum) + .racks(rackList.toArray(new String[rackList.size()])) + .build(); + cluster.waitActive(); + + // get http uri + String httpUri = cluster.getHttpUri(0); + + // send http request + URL url = new URL(httpUri + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.setRequestProperty("Accept", "application/json"); + conn.connect(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + String topology = out.toString(); + + // parse json + JsonNode racks = new ObjectMapper().readTree(topology); + + // assert rack number + assertEquals(racks.size(), 5); + + // assert node number + Iterator elements = racks.elements(); + int dataNodesCount = 0; + while(elements.hasNext()){ + JsonNode rack = elements.next(); + Iterator> fields = rack.fields(); + while (fields.hasNext()) { + dataNodesCount += fields.next().getValue().size(); + } + } + assertEquals(dataNodesCount, dataNodesNum); + } + + @Test + public void testPrintTopologyNoDatanodesTextFormat() throws IOException { StaticMapping.resetMap(); Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) @@ -112,4 +168,35 @@ public void testPrintTopologyNoDatanodes() throws IOException { // assert node number assertTrue(topology.contains("No DataNodes")); } + + @Test + public void testPrintTopologyNoDatanodesJsonFormat() throws IOException { + StaticMapping.resetMap(); + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .build(); + cluster.waitActive(); + + // get http uri + String httpUri = cluster.getHttpUri(0); + + // send http request + URL url = new URL(httpUri + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.setRequestProperty("Accept", "application/json"); + conn.connect(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + StringBuilder sb = + new StringBuilder("-- Network Topology -- \n"); + sb.append(out); + sb.append("\n-- Network Topology -- "); + String topology = sb.toString(); + + // assert node number + assertTrue(topology.contains("No DataNodes")); + } }