|
18 | 18 | package org.apache.hadoop.hdfs.server.namenode; |
19 | 19 |
|
20 | 20 | import org.apache.hadoop.conf.Configuration; |
| 21 | +import org.apache.hadoop.fs.FileSystem; |
21 | 22 | import org.apache.hadoop.fs.Path; |
| 23 | +import org.apache.hadoop.fs.StorageType; |
22 | 24 | import org.apache.hadoop.hdfs.DFSConfigKeys; |
23 | 25 | import org.apache.hadoop.hdfs.DFSTestUtil; |
24 | 26 | import org.apache.hadoop.hdfs.DistributedFileSystem; |
|
52 | 54 | import org.slf4j.Logger; |
53 | 55 | import org.slf4j.LoggerFactory; |
54 | 56 |
|
| 57 | +import java.io.IOException; |
| 58 | +import java.util.Arrays; |
55 | 59 | import java.util.BitSet; |
56 | 60 | import java.util.Iterator; |
57 | 61 | import java.util.List; |
@@ -515,4 +519,63 @@ public void testReconstrutionWithBusyBlock1() throws Exception { |
515 | 519 | assertEquals(9, bm.countNodes(blockInfo).liveReplicas()); |
516 | 520 | } |
517 | 521 |
|
| 522 | + @Test |
| 523 | + public void testReconstructionWithStorageTypeNotEnough() throws Exception { |
| 524 | + final HdfsConfiguration conf = new HdfsConfiguration(); |
| 525 | + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); |
| 526 | + |
| 527 | + // nine disk node eleven archive node |
| 528 | + int numDn = groupSize * 2 + 2; |
| 529 | + StorageType[][] storageTypes = new StorageType[numDn][]; |
| 530 | + Arrays.fill(storageTypes, 0, groupSize, |
| 531 | + new StorageType[]{StorageType.DISK, StorageType.DISK}); |
| 532 | + Arrays.fill(storageTypes, groupSize, numDn, |
| 533 | + new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE}); |
| 534 | + |
| 535 | + // nine Disk racks and one Archive rack |
| 536 | + String[] racks = { |
| 537 | + "/rack1", "/rack2", "/rack3", "/rack4", "/rack5", "/rack6", "/rack7", "/rack8", |
| 538 | + "/rack9", "/rack0", "/rack0", "/rack0", "/rack0", "/rack0", "/rack0", "/rack0", |
| 539 | + "/rack0", "/rack0", "/rack0", "/rack0"}; |
| 540 | + |
| 541 | + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDn) |
| 542 | + .storageTypes(storageTypes) |
| 543 | + .racks(racks) |
| 544 | + .build(); |
| 545 | + cluster.waitActive(); |
| 546 | + DistributedFileSystem fs = cluster.getFileSystem(); |
| 547 | + fs.enableErasureCodingPolicy( |
| 548 | + StripedFileTestUtil.getDefaultECPolicy().getName()); |
| 549 | + |
| 550 | + try { |
| 551 | + fs.mkdirs(dirPath); |
| 552 | + fs.setStoragePolicy(dirPath, "COLD"); |
| 553 | + fs.setErasureCodingPolicy(dirPath, |
| 554 | + StripedFileTestUtil.getDefaultECPolicy().getName()); |
| 555 | + DFSTestUtil.createFile(fs, filePath, |
| 556 | + cellSize * dataBlocks * 2, (short) 1, 0L); |
| 557 | + |
| 558 | + // stop one dn |
| 559 | + LocatedBlocks blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0); |
| 560 | + LocatedStripedBlock block = (LocatedStripedBlock) blks.getLastLocatedBlock(); |
| 561 | + DatanodeInfo dnToStop = block.getLocations()[0]; |
| 562 | + cluster.stopDataNode(dnToStop.getXferAddr()); |
| 563 | + cluster.setDataNodeDead(dnToStop); |
| 564 | + |
| 565 | + // wait for reconstruction to happen |
| 566 | + StripedFileTestUtil.waitForReconstructionFinished(filePath, fs, groupSize); |
| 567 | + blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0); |
| 568 | + block = (LocatedStripedBlock) blks.getLastLocatedBlock(); |
| 569 | + BitSet bitSet = new BitSet(groupSize); |
| 570 | + for (byte index : block.getBlockIndices()) { |
| 571 | + bitSet.set(index); |
| 572 | + } |
| 573 | + for (int i = 0; i < groupSize; i++) { |
| 574 | + Assert.assertTrue(bitSet.get(i)); |
| 575 | + } |
| 576 | + } finally { |
| 577 | + cluster.shutdown(); |
| 578 | + } |
| 579 | + } |
| 580 | + |
518 | 581 | } |
0 commit comments