diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index bdd20d7e27634..40a6b7e1f1b46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -144,10 +144,10 @@ public class DatanodeManager { private volatile boolean avoidSlowDataNodesForRead; /** Whether or not to consider lad for reading. */ - private final boolean readConsiderLoad; + private volatile boolean readConsiderLoad; /** Whether or not to consider storageType for reading. */ - private final boolean readConsiderStorageType; + private volatile boolean readConsiderStorageType; /** * Whether or not to avoid using stale DataNodes for writing. @@ -2288,4 +2288,22 @@ public void setMaxSlowPeersToReport(int maxSlowPeersToReport) { public boolean isSlowPeerCollectorInitialized() { return slowPeerCollectorDaemon == null; } + + public void setReadConsiderLoad(boolean enable) { + this.readConsiderLoad = enable; + } + + public void setReadConsiderStorageType(boolean enable) { + this.readConsiderStorageType = enable; + } + + @VisibleForTesting + public boolean isReadConsiderLoad() { + return readConsiderLoad; + } + + @VisibleForTesting + public boolean isReadConsiderStorageType() { + return readConsiderStorageType; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 4aa81152fa9ee..5a2ae8823fe73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -135,6 +135,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT; @@ -360,7 +364,9 @@ public enum OperationCategory { DFS_DATANODE_MAX_NODES_TO_REPORT_KEY, DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT, - DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK)); + DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK, + DFS_NAMENODE_READ_CONSIDERLOAD_KEY, + DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY)); private static final String USAGE = "Usage: hdfs namenode [" + StartupOption.BACKUP.getName() + "] | \n\t[" @@ -2354,6 +2360,10 @@ protected String reconfigurePropertyImpl(String property, String newVal) (property.equals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK))) { return reconfigureDecommissionBackoffMonitorParameters(datanodeManager, property, newVal); + } else if (property.equals(DFS_NAMENODE_READ_CONSIDERLOAD_KEY) + || property.equals(DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY)) { + return reconfigureReadStrategyParameters(datanodeManager, property, + newVal); } else { throw new ReconfigurationException(property, newVal, getConf().get( property)); @@ -2663,6 +2673,52 @@ private String reconfigureDecommissionBackoffMonitorParameters( } } + private String reconfigureReadStrategyParameters( + final DatanodeManager datanodeManager, final String property, + final String newVal) throws ReconfigurationException { + namesystem.writeLock(); + String result; + try { + switch (property) { + case DFS_NAMENODE_READ_CONSIDERLOAD_KEY: { + if (newVal != null && !newVal.equalsIgnoreCase("true") + && !newVal.equalsIgnoreCase("false")) { + throw new IllegalArgumentException(newVal + " is not boolean value"); + } + boolean enable = (newVal == null ? + DFS_NAMENODE_READ_CONSIDERLOAD_DEFAULT : + Boolean.parseBoolean(newVal)); + result = Boolean.toString(enable); + datanodeManager.setReadConsiderLoad(enable); + break; + } + case DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY: { + if (newVal != null && !newVal.equalsIgnoreCase("true") + && !newVal.equalsIgnoreCase("false")) { + throw new IllegalArgumentException(newVal + " is not boolean value"); + } + boolean enable = (newVal == null ? + DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_DEFAULT : + Boolean.parseBoolean(newVal)); + result = Boolean.toString(enable); + datanodeManager.setReadConsiderStorageType(enable); + break; + } + default: { + throw new IllegalArgumentException("Unexpected property " + property + + " in reconfigureReadStrategyParameters"); + } + } + LOG.info("RECONFIGURE* changed {} to {}", property, newVal); + return result; + } catch (IllegalArgumentException e) { + throw new ReconfigurationException(property, newVal, getConf().get( + property), e); + } finally { + namesystem.writeUnlock("reconfigureReadStrategyParameters"); + } + } + @Override // ReconfigurableBase protected Configuration getNewConf() { return new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java index ec7717e503a6b..ae89981c6aa8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java @@ -30,6 +30,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_NODES_TO_REPORT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY; import static org.junit.Assert.*; import org.slf4j.Logger; @@ -654,6 +656,43 @@ public void testReconfigureDecommissionBackoffMonitorParameters() } } + + @Test + public void testReconfigureReadStrategyParameters() throws Exception { + final NameNode nameNode = cluster.getNameNode(); + final DatanodeManager datanodeManager = + nameNode.namesystem.getBlockManager().getDatanodeManager(); + Configuration conf = nameNode.getConf(); + // expect the default value to be false + assertFalse(conf.getBoolean(DFS_NAMENODE_READ_CONSIDERLOAD_KEY, + true)); + assertFalse(conf.getBoolean(DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY, + true)); + + try { + nameNode.reconfigurePropertyImpl(DFS_NAMENODE_READ_CONSIDERLOAD_KEY, + "non-boolean"); + fail("should not reach here"); + } catch (ReconfigurationException e) { + assertEquals( + "Could not change property dfs.namenode.read.considerLoad " + + "from 'false' to 'non-boolean'", + e.getMessage()); + } + + + nameNode.reconfigurePropertyImpl(DFS_NAMENODE_READ_CONSIDERLOAD_KEY, + "true"); + nameNode.reconfigurePropertyImpl(DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY, + "TRUE"); + assertTrue(datanodeManager.isReadConsiderLoad()); + assertTrue(datanodeManager.isReadConsiderStorageType()); + + nameNode.reconfigurePropertyImpl(DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY, + null); + assertFalse(datanodeManager.isReadConsiderStorageType()); + } + @After public void shutDown() throws IOException { if (cluster != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index d7fee5f1f809e..0ff9349562ce9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -99,6 +99,8 @@ import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY; import static org.apache.hadoop.hdfs.client.HdfsAdmin.TRASH_PERMISSION; import static org.hamcrest.CoreMatchers.allOf; import static org.hamcrest.CoreMatchers.anyOf; @@ -441,7 +443,7 @@ public void testNameNodeGetReconfigurableProperties() throws IOException, Interr final List outs = Lists.newArrayList(); final List errs = Lists.newArrayList(); getReconfigurableProperties("namenode", address, outs, errs); - assertEquals(22, outs.size()); + assertEquals(24, outs.size()); assertTrue(outs.get(0).contains("Reconfigurable properties:")); assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY, outs.get(1)); assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(2)); @@ -456,6 +458,8 @@ public void testNameNodeGetReconfigurableProperties() throws IOException, Interr assertEquals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT, outs.get(11)); assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(12)); assertEquals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, outs.get(13)); + assertEquals(DFS_NAMENODE_READ_CONSIDERLOAD_KEY, outs.get(14)); + assertEquals(DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY, outs.get(15)); assertEquals(errs.size(), 0); }