From 60e682959a092ffc2f7e99bf71cf6c29a4400273 Mon Sep 17 00:00:00 2001 From: zhanghaobo Date: Fri, 16 Jun 2023 15:28:58 +0800 Subject: [PATCH 1/5] HDFS-17050. Erasure coding: fix bug for invalidating duplicated block when two ec block at the same datanode but different storage. --- .../hdfs/server/blockmanagement/DatanodeStorageInfo.java | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index fc6b537ef2ea8..6619b4fa5ce65 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -262,9 +262,11 @@ public AddBlockResult addBlock(BlockInfo b, Block reportedBlock) { if (otherStorage != null) { if (otherStorage != this) { - // The block belongs to a different storage. Remove it first. - otherStorage.removeBlock(b); - result = AddBlockResult.REPLACED; + if (!b.isStriped()) { + // The block belongs to a different storage. Remove it first. + otherStorage.removeBlock(b); + result = AddBlockResult.REPLACED; + } } else { // The block is already associated with this storage. return AddBlockResult.ALREADY_EXIST; From 0104f10f9c7ef933b0f9898ffba5d08976d44d79 Mon Sep 17 00:00:00 2001 From: zhanghaobo Date: Sun, 18 Jun 2023 20:27:18 +0800 Subject: [PATCH 2/5] tmp --- .../server/blockmanagement/BlockInfo.java | 3 ++- .../blockmanagement/BlockInfoStriped.java | 24 +++++++++++++++++++ .../blockmanagement/DatanodeStorageInfo.java | 12 ++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 659f218437539..22edb64bf9044 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -245,6 +245,7 @@ DatanodeStorageInfo findStorageInfo(DatanodeDescriptor dn) { // DatanodeStorageInfos for this block which could be local providedStorageInfo = cur; } + // 遍历到index为2的块-792时,进入到此if。 } else if (cur.getDatanodeDescriptor() == dn) { return cur; } @@ -259,7 +260,7 @@ DatanodeStorageInfo findStorageInfo(DatanodeDescriptor dn) { */ int findStorageInfo(DatanodeStorageInfo storageInfo) { int len = getCapacity(); - for(int idx = 0; idx < len; idx++) { + for (int idx = 0; idx < len; idx++) { DatanodeStorageInfo cur = getStorageInfo(idx); if (cur == storageInfo) { return idx; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java index 4b8d092935a00..73c2edc9ddb36 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java @@ -123,12 +123,18 @@ boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) { reportedBlock.getBlockId()) == this.getBlockId(), "reported blk_%s does not belong to the group of stored blk_%s", reportedBlock.getBlockId(), this.getBlockId()); + // blockIndex==1 int blockIndex = BlockIdManager.getBlockIndex(reportedBlock); + // index==1 int index = blockIndex; + // 可能拿到dn-01的/data10 DatanodeStorageInfo old = getStorageInfo(index); + // storage为dn-0281的/data12 + // storage是这次汇报上来的-793这个blockid所在的dsi if (old != null && !old.equals(storage)) { // over replicated // check if the storage has been stored int i = findStorageInfo(storage); + //int i = findStorageInfo(storage, reportedBlock); if (i == -1) { index = findSlot(); } else { @@ -158,6 +164,24 @@ private int findStorageInfoFromEnd(DatanodeStorageInfo storage) { return -1; } + /** + * Find specified DatanodeStorageInfo. + * @return index or -1 if not found. + */ + int findStorageInfo(DatanodeStorageInfo storageInfo, Block block) { +// Block blockOnStorage = getBlockOnStorage(storageInfo); +// long blockOnStorageId = blockOnStorage.getBlockId(); + int len = getCapacity(); + for (int idx = 0; idx < len; idx++) { + DatanodeStorageInfo cur = getStorageInfo(idx); +// if (cur == storageInfo && blockOnStorageId == block.getBlockId()) { + if (cur == storageInfo) { + return idx; + } + } + return -1; + } + @VisibleForTesting public byte getStorageBlockIndex(DatanodeStorageInfo storage) { int i = this.findStorageInfo(storage); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index 6619b4fa5ce65..a0a5fd32ed330 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -257,15 +257,26 @@ public AddBlockResult addBlock(BlockInfo b, Block reportedBlock) { // First check whether the block belongs to a different storage // on the same DN. AddBlockResult result = AddBlockResult.ADDED; + + // 返回的是index为2的DSI DatanodeStorageInfo otherStorage = b.findStorageInfo(getDatanodeDescriptor()); if (otherStorage != null) { + // this是index为1的DSI,(-793这个blockId) if (otherStorage != this) { if (!b.isStriped()) { // The block belongs to a different storage. Remove it first. otherStorage.removeBlock(b); result = AddBlockResult.REPLACED; + } else { + long reportBlockId = reportedBlock.getBlockId(); + Block blockOnStorage = ((BlockInfoStriped) b).getBlockOnStorage(otherStorage); + if (reportBlockId == blockOnStorage.getBlockId()) { + // The block belongs to a different storage. Remove it first. + otherStorage.removeBlock(b); + result = AddBlockResult.REPLACED; + } } } else { // The block is already associated with this storage. @@ -273,6 +284,7 @@ public AddBlockResult addBlock(BlockInfo b, Block reportedBlock) { } } + // 这里没有把blk_-9223372036578646783给添加到triplets数组的3*i位置上 // add to the head of the data-node list b.addStorage(this, reportedBlock); insertToList(b); From 26cad48d28c0bdd125618227104dfe26b365d454 Mon Sep 17 00:00:00 2001 From: zhanghaobo Date: Mon, 19 Jun 2023 14:17:11 +0800 Subject: [PATCH 3/5] tmp save --- .../apache/hadoop/util/JvmPauseMonitor.java | 4 +- .../server/blockmanagement/BlockInfo.java | 1 - .../blockmanagement/BlockInfoStriped.java | 49 ++++++++----------- .../blockmanagement/DatanodeStorageInfo.java | 4 -- .../server/namenode/TestAddStripedBlocks.java | 4 ++ 5 files changed, 27 insertions(+), 35 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java index 382266a99401f..371c8dda83540 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java @@ -195,11 +195,11 @@ public void run() { if (extraSleepTime > warnThresholdMs) { ++numGcWarnThresholdExceeded; - LOG.warn(formatMessage( + LOG.debug(formatMessage( extraSleepTime, gcTimesAfterSleep, gcTimesBeforeSleep)); } else if (extraSleepTime > infoThresholdMs) { ++numGcInfoThresholdExceeded; - LOG.info(formatMessage( + LOG.debug(formatMessage( extraSleepTime, gcTimesAfterSleep, gcTimesBeforeSleep)); } totalGcExtraSleepTime += extraSleepTime; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 22edb64bf9044..8530bd0828d55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -245,7 +245,6 @@ DatanodeStorageInfo findStorageInfo(DatanodeDescriptor dn) { // DatanodeStorageInfos for this block which could be local providedStorageInfo = cur; } - // 遍历到index为2的块-792时,进入到此if。 } else if (cur.getDatanodeDescriptor() == dn) { return cur; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java index 73c2edc9ddb36..5bb4f7c7b973b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -123,19 +123,30 @@ boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) { reportedBlock.getBlockId()) == this.getBlockId(), "reported blk_%s does not belong to the group of stored blk_%s", reportedBlock.getBlockId(), this.getBlockId()); - // blockIndex==1 int blockIndex = BlockIdManager.getBlockIndex(reportedBlock); - // index==1 int index = blockIndex; - // 可能拿到dn-01的/data10 DatanodeStorageInfo old = getStorageInfo(index); - // storage为dn-0281的/data12 - // storage是这次汇报上来的-793这个blockid所在的dsi if (old != null && !old.equals(storage)) { // over replicated // check if the storage has been stored + boolean blockIdNotEquals = false; + long blockGroupId = BlockIdManager.convertToStripedID(reportedBlock.getBlockId() - blockIndex); + Iterator blockIterator = old.getBlockIterator(); + while (blockIterator.hasNext()) { + BlockInfo blockInfo = blockIterator.next(); + if (!blockInfo.isStriped()) { + continue; + } else { + if (BlockIdManager.convertToStripedID(blockInfo.getBlockId()) == blockGroupId) { + if (blockInfo.getBlockId() != reportedBlock.getBlockId()) { + blockIdNotEquals = true; + break; + } + } + } + } + // 这里再加上判断条件: old Storage上的blockid与report id相同才行。否则让i=-1. int i = findStorageInfo(storage); - //int i = findStorageInfo(storage, reportedBlock); - if (i == -1) { + if (i == -1 || blockIdNotEquals) { index = findSlot(); } else { return true; @@ -164,24 +175,6 @@ private int findStorageInfoFromEnd(DatanodeStorageInfo storage) { return -1; } - /** - * Find specified DatanodeStorageInfo. - * @return index or -1 if not found. - */ - int findStorageInfo(DatanodeStorageInfo storageInfo, Block block) { -// Block blockOnStorage = getBlockOnStorage(storageInfo); -// long blockOnStorageId = blockOnStorage.getBlockId(); - int len = getCapacity(); - for (int idx = 0; idx < len; idx++) { - DatanodeStorageInfo cur = getStorageInfo(idx); -// if (cur == storageInfo && blockOnStorageId == block.getBlockId()) { - if (cur == storageInfo) { - return idx; - } - } - return -1; - } - @VisibleForTesting public byte getStorageBlockIndex(DatanodeStorageInfo storage) { int i = this.findStorageInfo(storage); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index a0a5fd32ed330..070946105e633 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -257,13 +257,10 @@ public AddBlockResult addBlock(BlockInfo b, Block reportedBlock) { // First check whether the block belongs to a different storage // on the same DN. AddBlockResult result = AddBlockResult.ADDED; - - // 返回的是index为2的DSI DatanodeStorageInfo otherStorage = b.findStorageInfo(getDatanodeDescriptor()); if (otherStorage != null) { - // this是index为1的DSI,(-793这个blockId) if (otherStorage != this) { if (!b.isStriped()) { // The block belongs to a different storage. Remove it first. @@ -284,7 +281,6 @@ public AddBlockResult addBlock(BlockInfo b, Block reportedBlock) { } } - // 这里没有把blk_-9223372036578646783给添加到triplets数组的3*i位置上 // add to the head of the data-node list b.addStorage(this, reportedBlock); insertToList(b); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index a3c2ebbdd3730..b4ac775a72b97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; +import org.apache.hadoop.hdfs.protocol.StripedBlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -381,6 +382,9 @@ public void testCheckStripedReplicaCorrupt() throws Exception { BlockInfo stored = fileNode.getBlocks()[0]; BlockManagerTestUtil.updateState(ns.getBlockManager()); assertEquals(0, ns.getCorruptReplicaBlocks()); +// for (BlockInfo b : fileNode.getBlocks()) { +// System.out.println("zhb###" + ((BlockInfoStriped) b).getRealDataBlockNum() ); +// } // Now send a block report with correct size DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString()); From 3a509bd1ac314dc8cc7eb40b53051eb60f92bb42 Mon Sep 17 00:00:00 2001 From: zhanghaobo Date: Mon, 19 Jun 2023 23:46:10 +0800 Subject: [PATCH 4/5] checkRep --- .../blockmanagement/BlockInfoStriped.java | 5 ++-- .../server/blockmanagement/BlockManager.java | 23 +++++++++++++------ .../blockmanagement/DatanodeStorageInfo.java | 4 ++-- .../server/namenode/TestAddStripedBlocks.java | 4 ---- 4 files changed, 21 insertions(+), 15 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java index 5bb4f7c7b973b..e4af2b8cbedbb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.hdfs.protocol.StripedBlockInfo; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; @@ -137,14 +138,14 @@ boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) { continue; } else { if (BlockIdManager.convertToStripedID(blockInfo.getBlockId()) == blockGroupId) { - if (blockInfo.getBlockId() != reportedBlock.getBlockId()) { + Block blockOnOldStorage = ((BlockInfoStriped) blockInfo).getBlockOnStorage(old); + if (blockOnOldStorage.getBlockId() != reportedBlock.getBlockId()) { blockIdNotEquals = true; break; } } } } - // 这里再加上判断条件: old Storage上的blockid与report id相同才行。否则让i=-1. int i = findStorageInfo(storage); if (i == -1 || blockIdNotEquals) { index = findSlot(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 9133515afbc02..2b3410d656d8c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2550,16 +2550,18 @@ DatanodeDescriptor[] chooseSourceDatanodes(BlockInfo block, BitSet liveBitSet = null; BitSet decommissioningBitSet = null; + HashSet alreadyCorruptedSet = null; if (isStriped) { int blockNum = ((BlockInfoStriped) block).getTotalBlockNum(); liveBitSet = new BitSet(blockNum); decommissioningBitSet = new BitSet(blockNum); + alreadyCorruptedSet = new HashSet<>(); } for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) { final DatanodeDescriptor node = getDatanodeDescriptorFromStorage(storage); final StoredReplicaState state = checkReplicaOnStorage(numReplicas, block, - storage, corruptReplicas.getNodes(block), false); + storage, corruptReplicas.getNodes(block), false, alreadyCorruptedSet); if (state == StoredReplicaState.LIVE) { if (storage.getStorageType() == StorageType.PROVIDED) { storage = new DatanodeStorageInfo(node, storage.getStorageID(), @@ -4543,13 +4545,15 @@ public NumberReplicas countNodes(BlockInfo b) { NumberReplicas countNodes(BlockInfo b, boolean inStartupSafeMode) { NumberReplicas numberReplicas = new NumberReplicas(); Collection nodesCorrupt = corruptReplicas.getNodes(b); + HashSet alreadyCorruptSet = null; if (b.isStriped()) { + alreadyCorruptSet = new HashSet<>(); countReplicasForStripedBlock(numberReplicas, (BlockInfoStriped) b, - nodesCorrupt, inStartupSafeMode); + nodesCorrupt, inStartupSafeMode, alreadyCorruptSet); } else { for (DatanodeStorageInfo storage : blocksMap.getStorages(b)) { checkReplicaOnStorage(numberReplicas, b, storage, nodesCorrupt, - inStartupSafeMode); + inStartupSafeMode, alreadyCorruptSet); } } return numberReplicas; @@ -4557,11 +4561,16 @@ NumberReplicas countNodes(BlockInfo b, boolean inStartupSafeMode) { private StoredReplicaState checkReplicaOnStorage(NumberReplicas counters, BlockInfo b, DatanodeStorageInfo storage, - Collection nodesCorrupt, boolean inStartupSafeMode) { + Collection nodesCorrupt, boolean inStartupSafeMode, + HashSet alreadyCorrupt) { final StoredReplicaState s; if (storage.getState() == State.NORMAL) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); - if (nodesCorrupt != null && nodesCorrupt.contains(node)) { + if (nodesCorrupt != null && nodesCorrupt.contains(node) && + (alreadyCorrupt == null || !alreadyCorrupt.contains(node))) { + if (alreadyCorrupt != null) { + alreadyCorrupt.add(node); + } s = StoredReplicaState.CORRUPT; } else if (inStartupSafeMode) { s = StoredReplicaState.LIVE; @@ -4607,12 +4616,12 @@ private StoredReplicaState checkReplicaOnStorage(NumberReplicas counters, */ private void countReplicasForStripedBlock(NumberReplicas counters, BlockInfoStriped block, Collection nodesCorrupt, - boolean inStartupSafeMode) { + boolean inStartupSafeMode, HashSet alreadyCorrupt) { BitSet liveBitSet = new BitSet(block.getTotalBlockNum()); BitSet decommissioningBitSet = new BitSet(block.getTotalBlockNum()); for (StorageAndBlockIndex si : block.getStorageAndIndexInfos()) { StoredReplicaState state = checkReplicaOnStorage(counters, block, - si.getStorage(), nodesCorrupt, inStartupSafeMode); + si.getStorage(), nodesCorrupt, inStartupSafeMode, alreadyCorrupt); countLiveAndDecommissioningReplicas(counters, state, liveBitSet, decommissioningBitSet, si.getBlockIndex()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index 070946105e633..ba20460496342 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -268,8 +268,8 @@ public AddBlockResult addBlock(BlockInfo b, Block reportedBlock) { result = AddBlockResult.REPLACED; } else { long reportBlockId = reportedBlock.getBlockId(); - Block blockOnStorage = ((BlockInfoStriped) b).getBlockOnStorage(otherStorage); - if (reportBlockId == blockOnStorage.getBlockId()) { + Block blockOnOtherStorage = ((BlockInfoStriped) b).getBlockOnStorage(otherStorage); + if (reportBlockId == blockOnOtherStorage.getBlockId()) { // The block belongs to a different storage. Remove it first. otherStorage.removeBlock(b); result = AddBlockResult.REPLACED; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index b4ac775a72b97..a3c2ebbdd3730 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; -import org.apache.hadoop.hdfs.protocol.StripedBlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -382,9 +381,6 @@ public void testCheckStripedReplicaCorrupt() throws Exception { BlockInfo stored = fileNode.getBlocks()[0]; BlockManagerTestUtil.updateState(ns.getBlockManager()); assertEquals(0, ns.getCorruptReplicaBlocks()); -// for (BlockInfo b : fileNode.getBlocks()) { -// System.out.println("zhb###" + ((BlockInfoStriped) b).getRealDataBlockNum() ); -// } // Now send a block report with correct size DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString()); From 692843f3757e636ab65c77d522ac77f53f439ed3 Mon Sep 17 00:00:00 2001 From: zhanghaobo Date: Tue, 20 Jun 2023 09:43:05 +0800 Subject: [PATCH 5/5] remove unused import --- .../hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java index e4af2b8cbedbb..122da667c165e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.hdfs.protocol.StripedBlockInfo; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block;