Skip to content

Commit 0d1f846

Browse files
author
zhuxiangyi
committed
add pendingDeleteBlocksCount metrics and remove removeBlocks func
1 parent de7524b commit 0d1f846

File tree

3 files changed

+27
-33
lines changed

3 files changed

+27
-33
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4940,13 +4940,15 @@ public long getLastRedundancyMonitorTS() {
49404940
private class MarkedDeleteBlockScrubber implements Runnable {
49414941
private Iterator<BlockInfo> toDeleteIterator = null;
49424942
private boolean isSleep;
4943+
private NameNodeMetrics metrics;
49434944

49444945
private void remove(long time) {
49454946
if (checkToDeleteIterator()) {
49464947
namesystem.writeLock();
49474948
try {
49484949
while (toDeleteIterator.hasNext()) {
49494950
removeBlock(toDeleteIterator.next());
4951+
metrics.decrPendingDeleteBlocksCount();
49504952
if (Time.monotonicNow() - time > deleteBlockLockTimeMs) {
49514953
isSleep = true;
49524954
break;
@@ -4969,7 +4971,7 @@ public void run() {
49694971
!Thread.currentThread().isInterrupted()) {
49704972
if (!markedDeleteQueue.isEmpty() || checkToDeleteIterator()) {
49714973
try {
4972-
NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
4974+
metrics = NameNode.getNameNodeMetrics();
49734975
metrics.setDeleteBlocksQueued(markedDeleteQueue.size());
49744976
isSleep = false;
49754977
long startTime = Time.monotonicNow();
@@ -5351,10 +5353,17 @@ public BlockIdManager getBlockIdManager() {
53515353
return blockIdManager;
53525354
}
53535355

5356+
@VisibleForTesting
53545357
public ConcurrentLinkedQueue<List<BlockInfo>> getMarkedDeleteQueue() {
53555358
return markedDeleteQueue;
53565359
}
53575360

5361+
public void addBLocksToMarkedDeleteQueue(List<BlockInfo> blockInfos) {
5362+
markedDeleteQueue.add(blockInfos);
5363+
NameNode.getNameNodeMetrics().
5364+
incrPendingDeleteBlocksCount(blockInfos.size());
5365+
}
5366+
53585367
public long nextGenerationStamp(boolean legacyBlock) throws IOException {
53595368
return blockIdManager.nextGenerationStamp(legacyBlock);
53605369
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Lines changed: 7 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,6 @@
9898
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_LISTING_LIMIT_DEFAULT;
9999
import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry;
100100

101-
import java.util.concurrent.ConcurrentLinkedQueue;
102101
import java.util.concurrent.atomic.AtomicLong;
103102

104103
import org.apache.commons.text.CaseUtils;
@@ -2374,7 +2373,7 @@ boolean truncate(String src, long newLength, String clientName,
23742373
}
23752374
getEditLog().logSync();
23762375
if (!toRemoveBlocks.getToDeleteList().isEmpty()) {
2377-
blockManager.getMarkedDeleteQueue().add(
2376+
blockManager.addBLocksToMarkedDeleteQueue(
23782377
toRemoveBlocks.getToDeleteList());
23792378
}
23802379
logAuditEvent(true, operationName, src, null, status);
@@ -2822,7 +2821,7 @@ private HdfsFileStatus startFileInt(String src,
28222821
if (!skipSync) {
28232822
getEditLog().logSync();
28242823
if (toRemoveBlocks != null) {
2825-
blockManager.getMarkedDeleteQueue().add(
2824+
blockManager.addBLocksToMarkedDeleteQueue(
28262825
toRemoveBlocks.getToDeleteList());
28272826
}
28282827
}
@@ -3346,7 +3345,7 @@ void renameTo(final String src, final String dst,
33463345
assert res != null;
33473346
BlocksMapUpdateInfo collectedBlocks = res.collectedBlocks;
33483347
if (!collectedBlocks.getToDeleteList().isEmpty()) {
3349-
blockManager.getMarkedDeleteQueue().add(
3348+
blockManager.addBLocksToMarkedDeleteQueue(
33503349
collectedBlocks.getToDeleteList());
33513350
}
33523351

@@ -3386,7 +3385,7 @@ boolean delete(String src, boolean recursive, boolean logRetryCache)
33863385
getEditLog().logSync();
33873386
logAuditEvent(ret, operationName, src);
33883387
if (toRemovedBlocks != null) {
3389-
blockManager.getMarkedDeleteQueue().add(
3388+
blockManager.addBLocksToMarkedDeleteQueue(
33903389
toRemovedBlocks.getToDeleteList());
33913390
}
33923391
return ret;
@@ -3397,30 +3396,6 @@ FSPermissionChecker getPermissionChecker()
33973396
return dir.getPermissionChecker();
33983397
}
33993398

3400-
/**
3401-
* From the given list, incrementally remove the blocks from blockManager
3402-
* Writelock is dropped and reacquired every blockDeletionIncrement to
3403-
* ensure that other waiters on the lock can get in. See HDFS-2938
3404-
*
3405-
* @param blocks
3406-
* An instance of {@link BlocksMapUpdateInfo} which contains a list
3407-
* of blocks that need to be removed from blocksMap
3408-
*/
3409-
void removeBlocks(BlocksMapUpdateInfo blocks) {
3410-
List<BlockInfo> toDeleteList = blocks.getToDeleteList();
3411-
Iterator<BlockInfo> iter = toDeleteList.iterator();
3412-
while (iter.hasNext()) {
3413-
writeLock();
3414-
try {
3415-
for (int i = 0; i < blockDeletionIncrement && iter.hasNext(); i++) {
3416-
blockManager.removeBlock(iter.next());
3417-
}
3418-
} finally {
3419-
writeUnlock("removeBlocks");
3420-
}
3421-
}
3422-
}
3423-
34243399
/**
34253400
* Remove leases and inodes related to a given path
34263401
* @param removedUCFiles INodes whose leases need to be released
@@ -4629,7 +4604,7 @@ private void clearCorruptLazyPersistFiles()
46294604
INodesInPath.fromINode((INodeFile) bc), false);
46304605
changed |= toRemoveBlocks != null;
46314606
if (toRemoveBlocks != null) {
4632-
blockManager.getMarkedDeleteQueue().add(
4607+
blockManager.addBLocksToMarkedDeleteQueue(
46334608
toRemoveBlocks.getToDeleteList());
46344609
}
46354610
}
@@ -7341,7 +7316,7 @@ void deleteSnapshot(String snapshotRoot, String snapshotName,
73417316
// Breaking the pattern as removing blocks have to happen outside of the
73427317
// global lock
73437318
if (blocksToBeDeleted != null) {
7344-
blockManager.getMarkedDeleteQueue().add(
7319+
blockManager.addBLocksToMarkedDeleteQueue(
73457320
blocksToBeDeleted.getToDeleteList());
73467321
}
73477322
logAuditEvent(true, operationName, rootPath, null, null);
@@ -7368,7 +7343,7 @@ public void gcDeletedSnapshot(String snapshotRoot, String snapshotName)
73687343
} finally {
73697344
writeUnlock(operationName, getLockReportInfoSupplier(rootPath));
73707345
}
7371-
blockManager.getMarkedDeleteQueue().add(
7346+
blockManager.addBLocksToMarkedDeleteQueue(
73727347
blocksToBeDeleted.getToDeleteList());
73737348
}
73747349

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,8 @@ public class NameNodeMetrics {
9191
MutableGaugeInt pendingEditsCount;
9292
@Metric("Number of delete blocks Queued")
9393
MutableGaugeInt deleteBlocksQueued;
94+
@Metric("Number of pending deletion blocks")
95+
MutableGaugeInt pendingDeleteBlocksCount;
9496

9597
@Metric("Number of file system operations")
9698
public long totalFileOps(){
@@ -347,6 +349,14 @@ public void setDeleteBlocksQueued(int size) {
347349
deleteBlocksQueued.set(size);
348350
}
349351

352+
public void incrPendingDeleteBlocksCount(int size) {
353+
pendingDeleteBlocksCount.incr(size);
354+
}
355+
356+
public void decrPendingDeleteBlocksCount() {
357+
pendingDeleteBlocksCount.decr();
358+
}
359+
350360
public void addBlockOpsBatched(int count) {
351361
blockOpsBatched.incr(count);
352362
}

0 commit comments

Comments
 (0)