Skip to content

Commit 7d89204

Browse files
committed
change config name and simplify the code
1 parent 9fbdca6 commit 7d89204

File tree

5 files changed

+27
-44
lines changed

5 files changed

+27
-44
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -698,9 +698,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
698698
"dfs.datanode.max.disks.to.report";
699699
public static final int DFS_DATANODE_MAX_DISKS_TO_REPORT_DEFAULT =
700700
5;
701-
public static final String DFS_DATANODE_MAX_SLOWDISKS_TO_BE_EXCLUDED_KEY =
702-
"dfs.datanode.max.slowdisks.to.be.excluded";
703-
public static final int DFS_DATANODE_MAX_SLOWDISKS_TO_BE_EXCLUDED_DEFAULT =
701+
public static final String DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY =
702+
"dfs.datanode.max.slowdisks.to.exclude";
703+
public static final int DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_DEFAULT =
704704
0;
705705
public static final String DFS_DATANODE_HOST_NAME_KEY =
706706
HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_HOST_NAME_KEY;

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -107,9 +107,9 @@ private FsVolumeReference chooseVolume(List<FsVolumeImpl> list,
107107

108108
// Exclude slow disks when choosing volume.
109109
if (diskMetrics != null) {
110-
List<String> slowDisksToBeExcluded = diskMetrics.getSlowDisksToBeExcluded();
110+
List<String> slowDisksToExclude = diskMetrics.getSlowDisksToExclude();
111111
list = list.stream()
112-
.filter(volume -> !slowDisksToBeExcluded.contains(volume.getBaseURI().getPath()))
112+
.filter(volume -> !slowDisksToExclude.contains(volume.getBaseURI().getPath()))
113113
.collect(Collectors.toList());
114114
}
115115

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java

Lines changed: 16 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636

3737
import java.io.IOException;
3838
import java.util.ArrayList;
39+
import java.util.Collections;
3940
import java.util.HashMap;
4041
import java.util.Iterator;
4142
import java.util.List;
@@ -77,11 +78,11 @@ public class DataNodeDiskMetrics {
7778
/**
7879
* The number of slow disks that needs to be excluded.
7980
*/
80-
private int maxSlowDisksToBeExcluded;
81+
private int maxSlowDisksToExclude;
8182
/**
8283
* List of slow disks that need to be excluded.
8384
*/
84-
private List<String> slowDisksToBeExcluded = new ArrayList<>();
85+
private List<String> slowDisksToExclude = new ArrayList<>();
8586

8687
public DataNodeDiskMetrics(DataNode dn, long diskOutlierDetectionIntervalMs,
8788
Configuration conf) {
@@ -93,9 +94,9 @@ public DataNodeDiskMetrics(DataNode dn, long diskOutlierDetectionIntervalMs,
9394
lowThresholdMs =
9495
conf.getLong(DFSConfigKeys.DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY,
9596
DFSConfigKeys.DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_DEFAULT);
96-
maxSlowDisksToBeExcluded =
97-
conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_BE_EXCLUDED_KEY,
98-
DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_BE_EXCLUDED_DEFAULT);
97+
maxSlowDisksToExclude =
98+
conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY,
99+
DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_DEFAULT);
99100
slowDiskDetector =
100101
new OutlierDetector(minOutlierDetectionDisks, lowThresholdMs);
101102
shouldRun = true;
@@ -144,14 +145,19 @@ public void run() {
144145
detectAndUpdateDiskOutliers(metadataOpStats, readIoStats,
145146
writeIoStats);
146147

147-
// Sort the slow disks by latency.
148-
if (maxSlowDisksToBeExcluded > 0) {
148+
// Sort the slow disks by latency and .
149+
if (maxSlowDisksToExclude > 0) {
149150
ArrayList<DiskLatency> diskLatencies = new ArrayList<>();
150151
for (Map.Entry<String, Map<DiskOp, Double>> diskStats :
151152
diskOutliersStats.entrySet()) {
152153
diskLatencies.add(new DiskLatency(diskStats.getKey(), diskStats.getValue()));
153154
}
154-
sortSlowDisks(diskLatencies);
155+
156+
Collections.sort(diskLatencies, (o1, o2)
157+
-> Double.compare(o2.getMaxLatency(), o1.getMaxLatency()));
158+
159+
slowDisksToExclude = diskLatencies.stream().limit(maxSlowDisksToExclude)
160+
.map(DiskLatency::getSlowDisk).collect(Collectors.toList());
155161
}
156162
}
157163

@@ -197,29 +203,6 @@ private void detectAndUpdateDiskOutliers(Map<String, Double> metadataOpStats,
197203
}
198204
}
199205

200-
private void sortSlowDisks(ArrayList<DiskLatency> diskLatencies) {
201-
if (diskOutliersStats.isEmpty()) {
202-
return;
203-
}
204-
205-
final PriorityQueue<DiskLatency> topNReports = new PriorityQueue<>(
206-
diskLatencies.size(),
207-
(o1, o2) -> Doubles.compare(
208-
o1.getMaxLatency(), o2.getMaxLatency()));
209-
210-
for (DiskLatency diskLatency : diskLatencies) {
211-
if (topNReports.size() < maxSlowDisksToBeExcluded) {
212-
topNReports.add(diskLatency);
213-
} else if (topNReports.peek().getMaxLatency() <
214-
diskLatency.getMaxLatency()) {
215-
topNReports.poll();
216-
topNReports.add(diskLatency);
217-
}
218-
}
219-
slowDisksToBeExcluded =
220-
topNReports.stream().map(DiskLatency::getSlowDisk).collect(Collectors.toList());
221-
}
222-
223206
/**
224207
* This structure is a wrapper over disk latencies.
225208
*/
@@ -285,7 +268,7 @@ public void addSlowDiskForTesting(String slowDiskPath,
285268
}
286269
}
287270

288-
public List<String> getSlowDisksToBeExcluded() {
289-
return slowDisksToBeExcluded;
271+
public List<String> getSlowDisksToExclude() {
272+
return slowDisksToExclude;
290273
}
291274
}

hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2484,7 +2484,7 @@
24842484
</property>
24852485

24862486
<property>
2487-
<name>dfs.datanode.max.slowdisks.to.be.excluded</name>
2487+
<name>dfs.datanode.max.slowdisks.to.exclude</name>
24882488
<value>0</value>
24892489
<description>
24902490
The number of slow disks that needs to be excluded. By default, this parameter is set to 0,

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ public class TestFsVolumeList {
8080
private String baseDir;
8181
private BlockScanner blockScanner;
8282
private final static int NUM_DATANODES = 3;
83-
private final static int STORAGES_PER_DATANODE = 5;
83+
private final static int STORAGES_PER_DATANODE = 3;
8484
private final static int DEFAULT_BLOCK_SIZE = 102400;
8585
private final static int BUFFER_LENGTH = 1024;
8686

@@ -640,7 +640,7 @@ public void testExcludeSlowDiskWhenChoosingVolume() throws Exception {
640640
// Enable datanode disk metrics collector.
641641
conf.setInt(DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, 30);
642642
// Enable excluding slow disks when choosing volume.
643-
conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_BE_EXCLUDED_KEY, 1);
643+
conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY, 1);
644644
// Ensure that each volume capacity is larger than the DEFAULT_BLOCK_SIZE.
645645
long capacity = 10 * DEFAULT_BLOCK_SIZE;
646646
long[][] capacities = new long[NUM_DATANODES][STORAGES_PER_DATANODE];
@@ -704,9 +704,9 @@ public void testExcludeSlowDiskWhenChoosingVolume() throws Exception {
704704
// Wait until the data on the slow disk is collected successfully.
705705
GenericTestUtils.waitFor(new Supplier<Boolean>() {
706706
@Override public Boolean get() {
707-
return dn0.getDiskMetrics().getSlowDisksToBeExcluded().size() == 1 &&
708-
dn1.getDiskMetrics().getSlowDisksToBeExcluded().size() == 1 &&
709-
dn2.getDiskMetrics().getSlowDisksToBeExcluded().size() == 1;
707+
return dn0.getDiskMetrics().getSlowDisksToExclude().size() == 1 &&
708+
dn1.getDiskMetrics().getSlowDisksToExclude().size() == 1 &&
709+
dn2.getDiskMetrics().getSlowDisksToExclude().size() == 1;
710710
}
711711
}, 1000, 5000);
712712

0 commit comments

Comments
 (0)