Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -144,10 +144,10 @@ public class DatanodeManager {
private volatile boolean avoidSlowDataNodesForRead;

/** Whether or not to consider lad for reading. */
private final boolean readConsiderLoad;
private volatile boolean readConsiderLoad;

/** Whether or not to consider storageType for reading. */
private final boolean readConsiderStorageType;
private volatile boolean readConsiderStorageType;

/**
* Whether or not to avoid using stale DataNodes for writing.
Expand Down Expand Up @@ -2288,4 +2288,22 @@ public void setMaxSlowPeersToReport(int maxSlowPeersToReport) {
public boolean isSlowPeerCollectorInitialized() {
return slowPeerCollectorDaemon == null;
}

public void setReadConsiderLoad(boolean enable) {
this.readConsiderLoad = enable;
}

public void setReadConsiderStorageType(boolean enable) {
this.readConsiderStorageType = enable;
}

@VisibleForTesting
public boolean isReadConsiderLoad() {
return readConsiderLoad;
}

@VisibleForTesting
public boolean isReadConsiderStorageType() {
return readConsiderStorageType;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,10 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT;
Expand Down Expand Up @@ -360,7 +364,9 @@ public enum OperationCategory {
DFS_DATANODE_MAX_NODES_TO_REPORT_KEY,
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY,
DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT,
DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK));
DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK,
DFS_NAMENODE_READ_CONSIDERLOAD_KEY,
DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY));

private static final String USAGE = "Usage: hdfs namenode ["
+ StartupOption.BACKUP.getName() + "] | \n\t["
Expand Down Expand Up @@ -2354,6 +2360,10 @@ protected String reconfigurePropertyImpl(String property, String newVal)
(property.equals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK))) {
return reconfigureDecommissionBackoffMonitorParameters(datanodeManager, property,
newVal);
} else if (property.equals(DFS_NAMENODE_READ_CONSIDERLOAD_KEY)
|| property.equals(DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY)) {
return reconfigureReadStrategyParameters(datanodeManager, property,
newVal);
} else {
throw new ReconfigurationException(property, newVal, getConf().get(
property));
Expand Down Expand Up @@ -2663,6 +2673,52 @@ private String reconfigureDecommissionBackoffMonitorParameters(
}
}

private String reconfigureReadStrategyParameters(
final DatanodeManager datanodeManager, final String property,
final String newVal) throws ReconfigurationException {
namesystem.writeLock();
String result;
try {
switch (property) {
case DFS_NAMENODE_READ_CONSIDERLOAD_KEY: {
if (newVal != null && !newVal.equalsIgnoreCase("true")
&& !newVal.equalsIgnoreCase("false")) {
throw new IllegalArgumentException(newVal + " is not boolean value");
}
boolean enable = (newVal == null ?
DFS_NAMENODE_READ_CONSIDERLOAD_DEFAULT :
Boolean.parseBoolean(newVal));
result = Boolean.toString(enable);
datanodeManager.setReadConsiderLoad(enable);
break;
}
case DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY: {
if (newVal != null && !newVal.equalsIgnoreCase("true")
&& !newVal.equalsIgnoreCase("false")) {
throw new IllegalArgumentException(newVal + " is not boolean value");
}
boolean enable = (newVal == null ?
DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_DEFAULT :
Boolean.parseBoolean(newVal));
result = Boolean.toString(enable);
datanodeManager.setReadConsiderStorageType(enable);
break;
}
default: {
throw new IllegalArgumentException("Unexpected property " + property
+ " in reconfigureReadStrategyParameters");
}
}
LOG.info("RECONFIGURE* changed {} to {}", property, newVal);
return result;
} catch (IllegalArgumentException e) {
throw new ReconfigurationException(property, newVal, getConf().get(
property), e);
} finally {
namesystem.writeUnlock("reconfigureReadStrategyParameters");
}
}

@Override // ReconfigurableBase
protected Configuration getNewConf() {
return new HdfsConfiguration();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@

import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_NODES_TO_REPORT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY;
import static org.junit.Assert.*;

import org.slf4j.Logger;
Expand Down Expand Up @@ -654,6 +656,43 @@ public void testReconfigureDecommissionBackoffMonitorParameters()
}
}


@Test
public void testReconfigureReadStrategyParameters() throws Exception {
final NameNode nameNode = cluster.getNameNode();
final DatanodeManager datanodeManager =
nameNode.namesystem.getBlockManager().getDatanodeManager();
Configuration conf = nameNode.getConf();
// expect the default value to be false
assertFalse(conf.getBoolean(DFS_NAMENODE_READ_CONSIDERLOAD_KEY,
true));
assertFalse(conf.getBoolean(DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY,
true));

try {
nameNode.reconfigurePropertyImpl(DFS_NAMENODE_READ_CONSIDERLOAD_KEY,
"non-boolean");
fail("should not reach here");
} catch (ReconfigurationException e) {
assertEquals(
"Could not change property dfs.namenode.read.considerLoad "
+ "from 'false' to 'non-boolean'",
e.getMessage());
}


nameNode.reconfigurePropertyImpl(DFS_NAMENODE_READ_CONSIDERLOAD_KEY,
"true");
nameNode.reconfigurePropertyImpl(DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY,
"TRUE");
assertTrue(datanodeManager.isReadConsiderLoad());
assertTrue(datanodeManager.isReadConsiderStorageType());

nameNode.reconfigurePropertyImpl(DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY,
null);
assertFalse(datanodeManager.isReadConsiderStorageType());
}

@After
public void shutDown() throws IOException {
if (cluster != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,8 @@
import org.slf4j.LoggerFactory;

import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY;
import static org.apache.hadoop.hdfs.client.HdfsAdmin.TRASH_PERMISSION;
import static org.hamcrest.CoreMatchers.allOf;
import static org.hamcrest.CoreMatchers.anyOf;
Expand Down Expand Up @@ -441,7 +443,7 @@ public void testNameNodeGetReconfigurableProperties() throws IOException, Interr
final List<String> outs = Lists.newArrayList();
final List<String> errs = Lists.newArrayList();
getReconfigurableProperties("namenode", address, outs, errs);
assertEquals(22, outs.size());
assertEquals(24, outs.size());
assertTrue(outs.get(0).contains("Reconfigurable properties:"));
assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY, outs.get(1));
assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(2));
Expand All @@ -456,6 +458,8 @@ public void testNameNodeGetReconfigurableProperties() throws IOException, Interr
assertEquals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT, outs.get(11));
assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(12));
assertEquals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, outs.get(13));
assertEquals(DFS_NAMENODE_READ_CONSIDERLOAD_KEY, outs.get(14));
assertEquals(DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY, outs.get(15));
assertEquals(errs.size(), 0);
}

Expand Down