From 5e430d5a11a6b018de13ededaad3113e83ec263c Mon Sep 17 00:00:00 2001 From: Aravindan Vijayan Date: Wed, 11 Dec 2019 12:19:51 -0800 Subject: [PATCH 1/5] HDFS-14989. Add a 'swapBlockList' operation to Namenode. --- .../java/org/apache/hadoop/fs/Options.java | 23 ++ .../hdfs/server/namenode/FSNamesystem.java | 32 +++ .../hdfs/server/namenode/INodeFile.java | 24 ++ .../server/namenode/NameNodeRpcServer.java | 12 + .../hdfs/server/namenode/SwapBlockListOp.java | 176 ++++++++++++ .../server/namenode/TestSwapBlockList.java | 258 ++++++++++++++++++ 6 files changed, 525 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java index 75bc12df8fdcf..1a2c48c360b04 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java @@ -518,4 +518,27 @@ public enum ChecksumCombineMode { MD5MD5CRC, // MD5 of block checksums, which are MD5 over chunk CRCs COMPOSITE_CRC // Block/chunk-independent composite CRC } + + /** + * Enum to support the varargs for swapBlockList() options + */ + public enum SwapBlockList { + NONE((byte) 0), // No options, swap the block list. + ONE_WAY_BLOCK_SWAP((byte) 1), // Skip replacing source with dst info. + EXCLUDE_BLOCK_LAYOUT_HEADER_SWAP((byte) 2); // Exclude block layout header swap + + private final byte code; + + private SwapBlockList(byte code) { + this.code = code; + } + + public static SwapBlockList valueOf(byte code) { + return code < 0 || code >= values().length ? null : values()[code]; + } + + public byte value() { + return code; + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 8d1884e41efe1..7e9422c1d225f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -106,6 +106,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.server.common.ECTopologyVerifier; +import org.apache.hadoop.hdfs.server.namenode.SwapBlockListOp.SwapBlockListResult; import org.apache.hadoop.hdfs.server.namenode.metrics.ReplicatedBlocksMBean; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; import org.apache.hadoop.util.Time; @@ -8272,5 +8273,36 @@ public void checkErasureCodingSupported(String operationName) throw new UnsupportedActionException(operationName + " not supported."); } } + + /** + * Namesystem API to swap block list between source and destination files. + * + * @param src source file. + * @param dst destination file. + * @throws IOException on Error. + */ + boolean swapBlockList(final String src, final String dst, + Options.SwapBlockList... options) + throws IOException { + final String operationName = "swapBlockList"; + checkOperation(OperationCategory.WRITE); + final FSPermissionChecker pc = getPermissionChecker(); + SwapBlockListResult res = null; + try { + writeLock(); + try { + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot swap block list." + src + ", " + dst); + res = SwapBlockListOp.swapBlocks(dir, pc, src, dst, options); + } finally { + writeUnlock(operationName); + } + } catch (AccessControlException e) { + logAuditEvent(false, operationName, src, dst, null); + throw e; + } + logAuditEvent(true, operationName, src, dst, res.dstFileAuditStat); + return res.success; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index ce654b789f31b..52988197d17b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -163,6 +163,10 @@ static byte getStoragePolicyID(long header) { return (byte)STORAGE_POLICY_ID.BITS.retrieve(header); } + static byte getBlockLayoutPolicy(long header) { + return (byte)BLOCK_LAYOUT_AND_REDUNDANCY.BITS.retrieve(header); + } + // Union of all the block type masks. Currently there is only // BLOCK_TYPE_MASK_STRIPED static final long BLOCK_TYPE_MASK = 1 << 11; @@ -728,6 +732,15 @@ public void clearBlocks() { this.blocks = BlockInfo.EMPTY_ARRAY; } + /** + * This method replaces blocks in a file with the supplied blocks. Make sure + * you know what you are doing when you are calling this! + * @param newBlocks List of new blocks. + */ + void replaceBlocks(BlockInfo[] newBlocks) { + this.blocks = Arrays.copyOf(newBlocks, newBlocks.length); + } + private void updateRemovedUnderConstructionFiles( ReclaimContext reclaimContext) { if (isUnderConstruction() && reclaimContext.removedUCFiles != null) { @@ -1219,4 +1232,15 @@ boolean isBlockInLatestSnapshot(BlockInfo block) { return snapshotBlocks != null && Arrays.asList(snapshotBlocks).contains(block); } + + /** + * Update Header with new Block Layout and Redundancy bits. + * @param newBlockLayoutPolicy new block layout policy. + */ + void updateHeaderWithNewBlockLayoutPolicy(byte newBlockLayoutPolicy) { + this.header = HeaderFormat.toLong( + HeaderFormat.getPreferredBlockSize(header), + newBlockLayoutPolicy, + HeaderFormat.getStoragePolicyID(header)); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index e4839612e76a4..b6e9dddc66dc2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -2637,4 +2637,16 @@ public Long getNextSPSPath() throws IOException { } return namesystem.getBlockManager().getSPSManager().getNextPathId(); } + + public boolean swapBlockList(String src, String dst, + Options.SwapBlockList... options) + throws IOException { + checkNNStartup(); + if (stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*DIR* NameNode.swapBlockList: {} and {}", src, dst); + } + namesystem.checkOperation(OperationCategory.WRITE); + return namesystem.swapBlockList(src, dst); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java new file mode 100644 index 0000000000000..e6fc49c7b899a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java @@ -0,0 +1,176 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.Arrays; + +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; +import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; +import org.apache.hadoop.util.Time; + +/** + * Class to carry out the operation of swapping blocks from one file to another. + * Along with swapping blocks, we can also optionally swap the block layout + * of a file header, which is useful for client operations like converting + * replicated to EC file. + */ +public class SwapBlockListOp { + + static SwapBlockListResult swapBlocks(FSDirectory fsd, FSPermissionChecker pc, + String src, String dst, + Options.SwapBlockList... options) throws IOException { + + final INodesInPath srcIIP = fsd.resolvePath(pc, src, DirOp.WRITE); + final INodesInPath dstIIP = fsd.resolvePath(pc, dst, DirOp.WRITE); + if (fsd.isPermissionEnabled()) { + fsd.checkAncestorAccess(pc, srcIIP, FsAction.WRITE); + fsd.checkAncestorAccess(pc, dstIIP, FsAction.WRITE); + } + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* FSDirectory.swapBlockList: " + + srcIIP.getPath() + " and " + dstIIP.getPath()); + } + SwapBlockListResult result = null; + fsd.writeLock(); + try { + result = swapBlockList(fsd, srcIIP, dstIIP, options); + } finally { + fsd.writeUnlock(); + } + return result; + } + + private static SwapBlockListResult swapBlockList(FSDirectory fsd, + final INodesInPath srcIIP, + final INodesInPath dstIIP, + Options.SwapBlockList... options) + throws IOException { + + assert fsd.hasWriteLock(); + validateInode(fsd, srcIIP); + validateInode(fsd, dstIIP); + fsd.ezManager.checkMoveValidity(srcIIP, dstIIP); + + final String src = srcIIP.getPath(); + final String dst = dstIIP.getPath(); + if (dst.equals(src)) { + throw new FileAlreadyExistsException("The source " + src + + " and destination " + dst + " are the same"); + } + + INodeFile srcINodeFile = (INodeFile) srcIIP.getLastINode(); + INodeFile dstINodeFile = (INodeFile) dstIIP.getLastINode(); + + long mtime = Time.now(); + BlockInfo[] dstINodeFileBlocks = dstINodeFile.getBlocks(); + dstINodeFile.replaceBlocks(srcINodeFile.getBlocks()); + + boolean overwrite = options != null + && Arrays.asList(options).contains( + Options.SwapBlockList.ONE_WAY_BLOCK_SWAP); + if (!overwrite) { + srcINodeFile.replaceBlocks(dstINodeFileBlocks); + } + + boolean excludeHeader = options != null && + Arrays.asList(options).contains( + Options.SwapBlockList.EXCLUDE_BLOCK_LAYOUT_HEADER_SWAP); + if (!excludeHeader) { + long srcHeader = srcINodeFile.getHeaderLong(); + long dstHeader = dstINodeFile.getHeaderLong(); + + byte srcBlockLayoutPolicy = + HeaderFormat.getBlockLayoutPolicy(srcHeader); + dstINodeFile.updateHeaderWithNewBlockLayoutPolicy(srcBlockLayoutPolicy); + + if (!overwrite) { + byte dstBlockLayoutPolicy = + HeaderFormat.getBlockLayoutPolicy(dstHeader); + srcINodeFile.updateHeaderWithNewBlockLayoutPolicy(dstBlockLayoutPolicy); + srcINodeFile.setModificationTime(mtime); + } + } + // Update modification time. + dstINodeFile.setModificationTime(mtime); + + return new SwapBlockListResult(true, + fsd.getAuditFileInfo(srcIIP), + fsd.getAuditFileInfo(dstIIP)); + } + + private static void validateInode(FSDirectory fsd, INodesInPath srcIIP) + throws IOException { + + String errorPrefix = "DIR* FSDirectory.swapBlockList: "; + String error = "Swap Block List input "; + final INode srcInode = srcIIP.getLastINode(); + + // Check if INode is null. + if (srcInode == null) { + error += srcIIP.getPath() + " is not found."; + NameNode.stateChangeLog.warn(errorPrefix + error); + throw new FileNotFoundException(error); + } + + // Check if INode is a file and NOT a directory. + if (!srcInode.isFile()) { + error += srcIIP.getPath() + " is not a file."; + NameNode.stateChangeLog.warn(errorPrefix + error); + throw new IOException(error); + } + + // Check if file is under construction. + INodeFile iNodeFile = (INodeFile) srcIIP.getLastINode(); + if (iNodeFile.isUnderConstruction()) { + error += srcIIP.getPath() + " is under construction."; + NameNode.stateChangeLog.warn(errorPrefix + error); + throw new IOException(error); + } + + // Check if any parent directory is in a snapshot. + if (srcIIP.getLatestSnapshotId() != Snapshot.CURRENT_STATE_ID) { + error += srcIIP.getPath() + " is in a snapshot directory."; + NameNode.stateChangeLog.warn(errorPrefix + error); + throw new IOException(error); + } + + } + + static class SwapBlockListResult { + final boolean success; + final FileStatus srcFileAuditStat; + final FileStatus dstFileAuditStat; + + SwapBlockListResult(boolean success, + FileStatus srcFileAuditStat, + FileStatus dstFileAuditStat) { + this.success = success; + this.srcFileAuditStat = srcFileAuditStat; + this.dstFileAuditStat = dstFileAuditStat; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java new file mode 100644 index 0000000000000..a8e3385778197 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java @@ -0,0 +1,258 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.FileNotFoundException; +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat; +import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class TestSwapBlockList { + + private static final short REPLICATION = 3; + + private static final long seed = 0; + private final Path rootDir = new Path("/" + getClass().getSimpleName()); + + private final Path subDir1 = new Path(rootDir, "dir1"); + private final Path file1 = new Path(subDir1, "file1"); + private final Path file2 = new Path(subDir1, "file2"); + + private final Path subDir11 = new Path(subDir1, "dir11"); + private final Path file3 = new Path(subDir11, "file3"); + + private final Path subDir2 = new Path(rootDir, "dir2"); + private final Path file4 = new Path(subDir2, "file4"); + + private Configuration conf; + private MiniDFSCluster cluster; + private FSNamesystem fsn; + private FSDirectory fsdir; + + private DistributedFileSystem hdfs; + + @Before + public void setUp() throws Exception { + conf = new Configuration(); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2); + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(REPLICATION) + .build(); + cluster.waitActive(); + + fsn = cluster.getNamesystem(); + fsdir = fsn.getFSDirectory(); + + hdfs = cluster.getFileSystem(); + + hdfs.mkdirs(subDir2); + + DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, file3, 1024, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, file4, 1024, REPLICATION, seed); + + } + + @After + public void tearDown() throws Exception { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testInputValidation() throws Exception { + + // Source file not found. + try { + fsn.swapBlockList("/TestSwapBlockList/dir1/fileXYZ", + "/TestSwapBlockList/dir1/dir11/file3"); + Assert.fail(); + } catch (IOException ioEx) { + Assert.assertTrue(ioEx instanceof FileNotFoundException); + Assert.assertTrue( + ioEx.getMessage().contains("/TestSwapBlockList/dir1/fileXYZ")); + } + + // Destination file not found. + try { + fsn.swapBlockList("/TestSwapBlockList/dir1/file1", + "/TestSwapBlockList/dir1/dir11/fileXYZ"); + Assert.fail(); + } catch (IOException ioEx) { + Assert.assertTrue(ioEx instanceof FileNotFoundException); + Assert.assertTrue( + ioEx.getMessage().contains("/TestSwapBlockList/dir1/dir11/fileXYZ")); + } + + // Source is Directory, not a file. + try { + fsn.swapBlockList("/TestSwapBlockList/dir1", + "/TestSwapBlockList/dir1/dir11/file3"); + Assert.fail(); + } catch (IOException ioEx) { + Assert.assertTrue( + ioEx.getMessage().contains("/TestSwapBlockList/dir1 is not a file.")); + } + + String sourceFile = "/TestSwapBlockList/dir1/file1"; + String dstFile = "/TestSwapBlockList/dir1/dir11/file3"; + + // Destination file is under construction. + INodeFile dstInodeFile = + (INodeFile) fsdir.resolvePath(fsdir.getPermissionChecker(), + dstFile, FSDirectory.DirOp.WRITE).getLastINode(); + dstInodeFile.toUnderConstruction("TestClient", "TestClientMachine"); + try { + fsn.swapBlockList(sourceFile, dstFile); + Assert.fail(); + } catch (IOException ioEx) { + Assert.assertTrue( + ioEx.getMessage().contains(dstFile + " is under construction.")); + } + + // Check if parent directory is in snapshot. + SnapshotTestHelper.createSnapshot(hdfs, subDir2, "s0"); + dstFile = "/TestSwapBlockList/dir2/file4"; + try { + fsn.swapBlockList(sourceFile, dstFile); + Assert.fail(); + } catch (IOException ioEx) { + Assert.assertTrue( + ioEx.getMessage().contains(dstFile + " is in a snapshot directory.")); + } + } + + @Test + public void testSwapBlockListOp() throws Exception { + String sourceFile = "/TestSwapBlockList/dir1/file1"; + String dstFile = "/TestSwapBlockList/dir1/dir11/file3"; + + INodeFile srcInodeFile = + (INodeFile) fsdir.resolvePath(fsdir.getPermissionChecker(), + sourceFile, FSDirectory.DirOp.WRITE).getLastINode(); + INodeFile dstInodeFile = + (INodeFile) fsdir.resolvePath(fsdir.getPermissionChecker(), + dstFile, FSDirectory.DirOp.WRITE).getLastINode(); + + BlockInfo[] srcBlockLocationsBeforeSwap = srcInodeFile.getBlocks(); + long srcHeader = srcInodeFile.getHeaderLong(); + + BlockInfo[] dstBlockLocationsBeforeSwap = dstInodeFile.getBlocks(); + long dstHeader = dstInodeFile.getHeaderLong(); + + fsn.swapBlockList(sourceFile, dstFile); + assertBlockListEquality(dstBlockLocationsBeforeSwap, + srcInodeFile.getBlocks()); + assertBlockListEquality(srcBlockLocationsBeforeSwap, + dstInodeFile.getBlocks()); + + // Assert Block Layout + Assert.assertEquals(HeaderFormat.getBlockLayoutPolicy(srcHeader), + HeaderFormat.getBlockLayoutPolicy(dstInodeFile.getHeaderLong())); + Assert.assertEquals(HeaderFormat.getBlockLayoutPolicy(dstHeader), + HeaderFormat.getBlockLayoutPolicy(srcInodeFile.getHeaderLong())); + } + + @Test + public void testSwapBlockListOpOneWay() throws Exception { + String sourceFile = "/TestSwapBlockList/dir1/file1"; + String dstFile = "/TestSwapBlockList/dir1/dir11/file3"; + + INodeFile srcInodeFile = + (INodeFile) fsdir.resolvePath(fsdir.getPermissionChecker(), + sourceFile, FSDirectory.DirOp.WRITE).getLastINode(); + INodeFile dstInodeFile = + (INodeFile) fsdir.resolvePath(fsdir.getPermissionChecker(), + dstFile, FSDirectory.DirOp.WRITE).getLastINode(); + + BlockInfo[] srcBlockLocationsBeforeSwap = srcInodeFile.getBlocks(); + long srcHeader = srcInodeFile.getHeaderLong(); + + fsn.swapBlockList(sourceFile, dstFile, + Options.SwapBlockList.ONE_WAY_BLOCK_SWAP); + assertBlockListEquality(srcBlockLocationsBeforeSwap, + dstInodeFile.getBlocks()); + assertBlockListEquality(srcBlockLocationsBeforeSwap, + srcInodeFile.getBlocks()); + + // Assert Block Layout + Assert.assertEquals(HeaderFormat.getBlockLayoutPolicy(srcHeader), + HeaderFormat.getBlockLayoutPolicy(dstInodeFile.getHeaderLong())); + Assert.assertEquals(HeaderFormat.getBlockLayoutPolicy(srcHeader), + HeaderFormat.getBlockLayoutPolicy(srcInodeFile.getHeaderLong())); + } + + @Test + public void testSwapBlockListOpRollback() throws Exception { + // Invoke swap twice and make sure the blocks are back to their original + // file. + String sourceFile = "/TestSwapBlockList/dir1/file1"; + String dstFile = "/TestSwapBlockList/dir1/dir11/file3"; + + INodeFile srcInodeFile = + (INodeFile) fsdir.resolvePath(fsdir.getPermissionChecker(), + sourceFile, FSDirectory.DirOp.WRITE).getLastINode(); + INodeFile dstInodeFile = + (INodeFile) fsdir.resolvePath(fsdir.getPermissionChecker(), + dstFile, FSDirectory.DirOp.WRITE).getLastINode(); + + BlockInfo[] srcBlockLocationsBeforeSwap = srcInodeFile.getBlocks(); + long srcHeader = srcInodeFile.getHeaderLong(); + + BlockInfo[] dstBlockLocationsBeforeSwap = dstInodeFile.getBlocks(); + long dstHeader = dstInodeFile.getHeaderLong(); + + testSwapBlockListOp(); + testSwapBlockListOp(); + + assertBlockListEquality(dstBlockLocationsBeforeSwap, + dstInodeFile.getBlocks()); + assertBlockListEquality(srcBlockLocationsBeforeSwap, + srcInodeFile.getBlocks()); + + // Assert Block Layout + Assert.assertEquals(HeaderFormat.getBlockLayoutPolicy(srcHeader), + HeaderFormat.getBlockLayoutPolicy(srcInodeFile.getHeaderLong())); + Assert.assertEquals(HeaderFormat.getBlockLayoutPolicy(dstHeader), + HeaderFormat.getBlockLayoutPolicy(dstInodeFile.getHeaderLong())); + } + + private void assertBlockListEquality(BlockInfo[] expected, + BlockInfo[] actual) { + Assert.assertEquals(expected.length, actual.length); + for (int i = 0; i < expected.length; i++) { + Assert.assertEquals(expected[i].getBlockId(), actual[i].getBlockId()); + } + } +} From aba3317b2352683d07996f6ed4df8ce0efe11e6b Mon Sep 17 00:00:00 2001 From: Aravindan Vijayan Date: Thu, 12 Dec 2019 10:16:07 -0800 Subject: [PATCH 2/5] HDFS-14989. Add a 'swapBlockList' operation to Namenode. (Fix checkstyle issues) --- .../java/org/apache/hadoop/fs/Options.java | 7 +++--- .../hdfs/server/namenode/FSNamesystem.java | 4 ++-- .../hdfs/server/namenode/SwapBlockListOp.java | 23 +++++++++++++++---- .../server/namenode/TestSwapBlockList.java | 13 +++++++---- 4 files changed, 33 insertions(+), 14 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java index 1a2c48c360b04..9f5898af279bd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java @@ -520,16 +520,17 @@ public enum ChecksumCombineMode { } /** - * Enum to support the varargs for swapBlockList() options + * Enum to support the varargs for swapBlockList() options. */ public enum SwapBlockList { NONE((byte) 0), // No options, swap the block list. ONE_WAY_BLOCK_SWAP((byte) 1), // Skip replacing source with dst info. - EXCLUDE_BLOCK_LAYOUT_HEADER_SWAP((byte) 2); // Exclude block layout header swap + EXCLUDE_BLOCK_LAYOUT_HEADER_SWAP((byte) 2); + // Exclude block layout header swap private final byte code; - private SwapBlockList(byte code) { + SwapBlockList(byte code) { this.code = code; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 7e9422c1d225f..b9a6469c4ef6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -8301,8 +8301,8 @@ boolean swapBlockList(final String src, final String dst, logAuditEvent(false, operationName, src, dst, null); throw e; } - logAuditEvent(true, operationName, src, dst, res.dstFileAuditStat); - return res.success; + logAuditEvent(true, operationName, src, dst, res.getDstFileAuditStat()); + return res.isSuccess(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java index e6fc49c7b899a..370ccf23e8dc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java @@ -38,7 +38,10 @@ * of a file header, which is useful for client operations like converting * replicated to EC file. */ -public class SwapBlockListOp { +public final class SwapBlockListOp { + + private SwapBlockListOp() { + } static SwapBlockListResult swapBlocks(FSDirectory fsd, FSPermissionChecker pc, String src, String dst, @@ -161,9 +164,9 @@ private static void validateInode(FSDirectory fsd, INodesInPath srcIIP) } static class SwapBlockListResult { - final boolean success; - final FileStatus srcFileAuditStat; - final FileStatus dstFileAuditStat; + private final boolean success; + private final FileStatus srcFileAuditStat; + private final FileStatus dstFileAuditStat; SwapBlockListResult(boolean success, FileStatus srcFileAuditStat, @@ -172,5 +175,17 @@ static class SwapBlockListResult { this.srcFileAuditStat = srcFileAuditStat; this.dstFileAuditStat = dstFileAuditStat; } + + public boolean isSuccess() { + return success; + } + + public FileStatus getDstFileAuditStat() { + return dstFileAuditStat; + } + + public FileStatus getSrcFileAuditStat() { + return srcFileAuditStat; + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java index a8e3385778197..6801adc32e076 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java @@ -36,11 +36,14 @@ import org.junit.Before; import org.junit.Test; +/** + * Test SwapBlockListOp working. + */ public class TestSwapBlockList { private static final short REPLICATION = 3; - private static final long seed = 0; + private static final long SEED = 0; private final Path rootDir = new Path("/" + getClass().getSimpleName()); private final Path subDir1 = new Path(rootDir, "dir1"); @@ -76,10 +79,10 @@ public void setUp() throws Exception { hdfs.mkdirs(subDir2); - DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed); - DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed); - DFSTestUtil.createFile(hdfs, file3, 1024, REPLICATION, seed); - DFSTestUtil.createFile(hdfs, file4, 1024, REPLICATION, seed); + DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, SEED); + DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, SEED); + DFSTestUtil.createFile(hdfs, file3, 1024, REPLICATION, SEED); + DFSTestUtil.createFile(hdfs, file4, 1024, REPLICATION, SEED); } From 18f3be798bf793e551be118f696025389cfe3a47 Mon Sep 17 00:00:00 2001 From: Aravindan Vijayan Date: Wed, 18 Dec 2019 14:57:02 -0800 Subject: [PATCH 3/5] HDFS-14989. Address review comments. --- .../java/org/apache/hadoop/fs/Options.java | 24 -------- .../hdfs/server/namenode/FSNamesystem.java | 5 +- .../hdfs/server/namenode/INodeFile.java | 7 ++- .../server/namenode/NameNodeRpcServer.java | 3 +- .../hdfs/server/namenode/SwapBlockListOp.java | 53 ++++++------------ .../server/namenode/TestSwapBlockList.java | 56 +++++-------------- 6 files changed, 41 insertions(+), 107 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java index 9f5898af279bd..75bc12df8fdcf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java @@ -518,28 +518,4 @@ public enum ChecksumCombineMode { MD5MD5CRC, // MD5 of block checksums, which are MD5 over chunk CRCs COMPOSITE_CRC // Block/chunk-independent composite CRC } - - /** - * Enum to support the varargs for swapBlockList() options. - */ - public enum SwapBlockList { - NONE((byte) 0), // No options, swap the block list. - ONE_WAY_BLOCK_SWAP((byte) 1), // Skip replacing source with dst info. - EXCLUDE_BLOCK_LAYOUT_HEADER_SWAP((byte) 2); - // Exclude block layout header swap - - private final byte code; - - SwapBlockList(byte code) { - this.code = code; - } - - public static SwapBlockList valueOf(byte code) { - return code < 0 || code >= values().length ? null : values()[code]; - } - - public byte value() { - return code; - } - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index c0cef94bde1f8..92720f0d3cebf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -8282,8 +8282,7 @@ public void checkErasureCodingSupported(String operationName) * @param dst destination file. * @throws IOException on Error. */ - boolean swapBlockList(final String src, final String dst, - Options.SwapBlockList... options) + boolean swapBlockList(final String src, final String dst) throws IOException { final String operationName = "swapBlockList"; checkOperation(OperationCategory.WRITE); @@ -8294,7 +8293,7 @@ boolean swapBlockList(final String src, final String dst, try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot swap block list." + src + ", " + dst); - res = SwapBlockListOp.swapBlocks(dir, pc, src, dst, options); + res = SwapBlockListOp.swapBlocks(dir, pc, src, dst); } finally { writeUnlock(operationName); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 52988197d17b7..9be852ef87cb3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -733,12 +733,15 @@ public void clearBlocks() { } /** - * This method replaces blocks in a file with the supplied blocks. Make sure - * you know what you are doing when you are calling this! + * This method replaces blocks in a file with the supplied blocks. * @param newBlocks List of new blocks. + * @param newId new block collection id. */ void replaceBlocks(BlockInfo[] newBlocks) { this.blocks = Arrays.copyOf(newBlocks, newBlocks.length); + for (BlockInfo block : blocks) { + block.setBlockCollectionId(getId()); + } } private void updateRemovedUnderConstructionFiles( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index b6e9dddc66dc2..0b140890ef52a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -2638,8 +2638,7 @@ public Long getNextSPSPath() throws IOException { return namesystem.getBlockManager().getSPSManager().getNextPathId(); } - public boolean swapBlockList(String src, String dst, - Options.SwapBlockList... options) + public boolean swapBlockList(String src, String dst) throws IOException { checkNNStartup(); if (stateChangeLog.isDebugEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java index 370ccf23e8dc6..f99acfd140a17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java @@ -20,11 +20,9 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.util.Arrays; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; @@ -44,8 +42,7 @@ private SwapBlockListOp() { } static SwapBlockListResult swapBlocks(FSDirectory fsd, FSPermissionChecker pc, - String src, String dst, - Options.SwapBlockList... options) throws IOException { + String src, String dst) throws IOException { final INodesInPath srcIIP = fsd.resolvePath(pc, src, DirOp.WRITE); final INodesInPath dstIIP = fsd.resolvePath(pc, dst, DirOp.WRITE); @@ -60,7 +57,7 @@ static SwapBlockListResult swapBlocks(FSDirectory fsd, FSPermissionChecker pc, SwapBlockListResult result = null; fsd.writeLock(); try { - result = swapBlockList(fsd, srcIIP, dstIIP, options); + result = swapBlockList(fsd, srcIIP, dstIIP); } finally { fsd.writeUnlock(); } @@ -69,13 +66,12 @@ static SwapBlockListResult swapBlocks(FSDirectory fsd, FSPermissionChecker pc, private static SwapBlockListResult swapBlockList(FSDirectory fsd, final INodesInPath srcIIP, - final INodesInPath dstIIP, - Options.SwapBlockList... options) + final INodesInPath dstIIP) throws IOException { assert fsd.hasWriteLock(); - validateInode(fsd, srcIIP); - validateInode(fsd, dstIIP); + validateInode(srcIIP); + validateInode(dstIIP); fsd.ezManager.checkMoveValidity(srcIIP, dstIIP); final String src = srcIIP.getPath(); @@ -91,41 +87,28 @@ private static SwapBlockListResult swapBlockList(FSDirectory fsd, long mtime = Time.now(); BlockInfo[] dstINodeFileBlocks = dstINodeFile.getBlocks(); dstINodeFile.replaceBlocks(srcINodeFile.getBlocks()); + srcINodeFile.replaceBlocks(dstINodeFileBlocks); - boolean overwrite = options != null - && Arrays.asList(options).contains( - Options.SwapBlockList.ONE_WAY_BLOCK_SWAP); - if (!overwrite) { - srcINodeFile.replaceBlocks(dstINodeFileBlocks); - } + long srcHeader = srcINodeFile.getHeaderLong(); + long dstHeader = dstINodeFile.getHeaderLong(); - boolean excludeHeader = options != null && - Arrays.asList(options).contains( - Options.SwapBlockList.EXCLUDE_BLOCK_LAYOUT_HEADER_SWAP); - if (!excludeHeader) { - long srcHeader = srcINodeFile.getHeaderLong(); - long dstHeader = dstINodeFile.getHeaderLong(); - - byte srcBlockLayoutPolicy = - HeaderFormat.getBlockLayoutPolicy(srcHeader); - dstINodeFile.updateHeaderWithNewBlockLayoutPolicy(srcBlockLayoutPolicy); - - if (!overwrite) { - byte dstBlockLayoutPolicy = - HeaderFormat.getBlockLayoutPolicy(dstHeader); - srcINodeFile.updateHeaderWithNewBlockLayoutPolicy(dstBlockLayoutPolicy); - srcINodeFile.setModificationTime(mtime); - } - } - // Update modification time. + byte dstBlockLayoutPolicy = + HeaderFormat.getBlockLayoutPolicy(dstHeader); + byte srcBlockLayoutPolicy = + HeaderFormat.getBlockLayoutPolicy(srcHeader); + + dstINodeFile.updateHeaderWithNewBlockLayoutPolicy(srcBlockLayoutPolicy); dstINodeFile.setModificationTime(mtime); + srcINodeFile.updateHeaderWithNewBlockLayoutPolicy(dstBlockLayoutPolicy); + srcINodeFile.setModificationTime(mtime); + return new SwapBlockListResult(true, fsd.getAuditFileInfo(srcIIP), fsd.getAuditFileInfo(dstIIP)); } - private static void validateInode(FSDirectory fsd, INodesInPath srcIIP) + private static void validateInode(INodesInPath srcIIP) throws IOException { String errorPrefix = "DIR* FSDirectory.swapBlockList: "; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java index 6801adc32e076..84881c3562372 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java @@ -18,11 +18,12 @@ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertEquals; + import java.io.FileNotFoundException; import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -176,43 +177,14 @@ public void testSwapBlockListOp() throws Exception { fsn.swapBlockList(sourceFile, dstFile); assertBlockListEquality(dstBlockLocationsBeforeSwap, - srcInodeFile.getBlocks()); - assertBlockListEquality(srcBlockLocationsBeforeSwap, - dstInodeFile.getBlocks()); - - // Assert Block Layout - Assert.assertEquals(HeaderFormat.getBlockLayoutPolicy(srcHeader), - HeaderFormat.getBlockLayoutPolicy(dstInodeFile.getHeaderLong())); - Assert.assertEquals(HeaderFormat.getBlockLayoutPolicy(dstHeader), - HeaderFormat.getBlockLayoutPolicy(srcInodeFile.getHeaderLong())); - } - - @Test - public void testSwapBlockListOpOneWay() throws Exception { - String sourceFile = "/TestSwapBlockList/dir1/file1"; - String dstFile = "/TestSwapBlockList/dir1/dir11/file3"; - - INodeFile srcInodeFile = - (INodeFile) fsdir.resolvePath(fsdir.getPermissionChecker(), - sourceFile, FSDirectory.DirOp.WRITE).getLastINode(); - INodeFile dstInodeFile = - (INodeFile) fsdir.resolvePath(fsdir.getPermissionChecker(), - dstFile, FSDirectory.DirOp.WRITE).getLastINode(); - - BlockInfo[] srcBlockLocationsBeforeSwap = srcInodeFile.getBlocks(); - long srcHeader = srcInodeFile.getHeaderLong(); - - fsn.swapBlockList(sourceFile, dstFile, - Options.SwapBlockList.ONE_WAY_BLOCK_SWAP); - assertBlockListEquality(srcBlockLocationsBeforeSwap, - dstInodeFile.getBlocks()); + srcInodeFile.getBlocks(), srcInodeFile.getId()); assertBlockListEquality(srcBlockLocationsBeforeSwap, - srcInodeFile.getBlocks()); + dstInodeFile.getBlocks(), dstInodeFile.getId()); // Assert Block Layout - Assert.assertEquals(HeaderFormat.getBlockLayoutPolicy(srcHeader), + assertEquals(HeaderFormat.getBlockLayoutPolicy(srcHeader), HeaderFormat.getBlockLayoutPolicy(dstInodeFile.getHeaderLong())); - Assert.assertEquals(HeaderFormat.getBlockLayoutPolicy(srcHeader), + assertEquals(HeaderFormat.getBlockLayoutPolicy(dstHeader), HeaderFormat.getBlockLayoutPolicy(srcInodeFile.getHeaderLong())); } @@ -240,22 +212,24 @@ public void testSwapBlockListOpRollback() throws Exception { testSwapBlockListOp(); assertBlockListEquality(dstBlockLocationsBeforeSwap, - dstInodeFile.getBlocks()); + dstInodeFile.getBlocks(), dstInodeFile.getId()); assertBlockListEquality(srcBlockLocationsBeforeSwap, - srcInodeFile.getBlocks()); + srcInodeFile.getBlocks(), srcInodeFile.getId()); // Assert Block Layout - Assert.assertEquals(HeaderFormat.getBlockLayoutPolicy(srcHeader), + assertEquals(HeaderFormat.getBlockLayoutPolicy(srcHeader), HeaderFormat.getBlockLayoutPolicy(srcInodeFile.getHeaderLong())); - Assert.assertEquals(HeaderFormat.getBlockLayoutPolicy(dstHeader), + assertEquals(HeaderFormat.getBlockLayoutPolicy(dstHeader), HeaderFormat.getBlockLayoutPolicy(dstInodeFile.getHeaderLong())); } private void assertBlockListEquality(BlockInfo[] expected, - BlockInfo[] actual) { - Assert.assertEquals(expected.length, actual.length); + BlockInfo[] actual, + long expectedId) { + assertEquals(expected.length, actual.length); for (int i = 0; i < expected.length; i++) { - Assert.assertEquals(expected[i].getBlockId(), actual[i].getBlockId()); + assertEquals(expected[i].getBlockId(), actual[i].getBlockId()); + assertEquals(expectedId, actual[i].getBlockCollectionId()); } } } From f0d54f63ca0572fa89501771ef60c4cd75009698 Mon Sep 17 00:00:00 2001 From: Aravindan Vijayan Date: Sat, 4 Jan 2020 15:59:34 -0800 Subject: [PATCH 4/5] HDFS-14989. Swap storage policy ID, check destination file genStamp. --- .../hdfs/server/namenode/FSNamesystem.java | 4 +-- .../hdfs/server/namenode/INodeFile.java | 7 ++-- .../server/namenode/NameNodeRpcServer.java | 4 +-- .../hdfs/server/namenode/SwapBlockListOp.java | 28 +++++++++++---- .../server/namenode/TestSwapBlockList.java | 35 +++++++++++++++---- 5 files changed, 59 insertions(+), 19 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 92720f0d3cebf..e569a8e67b348 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -8282,7 +8282,7 @@ public void checkErasureCodingSupported(String operationName) * @param dst destination file. * @throws IOException on Error. */ - boolean swapBlockList(final String src, final String dst) + boolean swapBlockList(final String src, final String dst, long genTimestamp) throws IOException { final String operationName = "swapBlockList"; checkOperation(OperationCategory.WRITE); @@ -8293,7 +8293,7 @@ boolean swapBlockList(final String src, final String dst) try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot swap block list." + src + ", " + dst); - res = SwapBlockListOp.swapBlocks(dir, pc, src, dst); + res = SwapBlockListOp.swapBlocks(dir, pc, src, dst, genTimestamp); } finally { writeUnlock(operationName); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 9be852ef87cb3..abac3aebbe370 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -735,7 +735,6 @@ public void clearBlocks() { /** * This method replaces blocks in a file with the supplied blocks. * @param newBlocks List of new blocks. - * @param newId new block collection id. */ void replaceBlocks(BlockInfo[] newBlocks) { this.blocks = Arrays.copyOf(newBlocks, newBlocks.length); @@ -1239,11 +1238,13 @@ boolean isBlockInLatestSnapshot(BlockInfo block) { /** * Update Header with new Block Layout and Redundancy bits. * @param newBlockLayoutPolicy new block layout policy. + * @param newStoragePolicy new storage policy ID. */ - void updateHeaderWithNewBlockLayoutPolicy(byte newBlockLayoutPolicy) { + void updateHeaderWithNewPolicy(byte newBlockLayoutPolicy, + byte newStoragePolicy) { this.header = HeaderFormat.toLong( HeaderFormat.getPreferredBlockSize(header), newBlockLayoutPolicy, - HeaderFormat.getStoragePolicyID(header)); + newStoragePolicy); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 0b140890ef52a..effacecba6e1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -2638,14 +2638,14 @@ public Long getNextSPSPath() throws IOException { return namesystem.getBlockManager().getSPSManager().getNextPathId(); } - public boolean swapBlockList(String src, String dst) + public boolean swapBlockList(String src, String dst, long maxTimestamp) throws IOException { checkNNStartup(); if (stateChangeLog.isDebugEnabled()) { stateChangeLog.debug("*DIR* NameNode.swapBlockList: {} and {}", src, dst); } namesystem.checkOperation(OperationCategory.WRITE); - return namesystem.swapBlockList(src, dst); + return namesystem.swapBlockList(src, dst, maxTimestamp); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java index f99acfd140a17..4ff21c46cef2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SwapBlockListOp.java @@ -42,7 +42,8 @@ private SwapBlockListOp() { } static SwapBlockListResult swapBlocks(FSDirectory fsd, FSPermissionChecker pc, - String src, String dst) throws IOException { + String src, String dst, long genTimestamp) + throws IOException { final INodesInPath srcIIP = fsd.resolvePath(pc, src, DirOp.WRITE); final INodesInPath dstIIP = fsd.resolvePath(pc, dst, DirOp.WRITE); @@ -57,7 +58,7 @@ static SwapBlockListResult swapBlocks(FSDirectory fsd, FSPermissionChecker pc, SwapBlockListResult result = null; fsd.writeLock(); try { - result = swapBlockList(fsd, srcIIP, dstIIP); + result = swapBlockList(fsd, srcIIP, dstIIP, genTimestamp); } finally { fsd.writeUnlock(); } @@ -66,7 +67,8 @@ static SwapBlockListResult swapBlocks(FSDirectory fsd, FSPermissionChecker pc, private static SwapBlockListResult swapBlockList(FSDirectory fsd, final INodesInPath srcIIP, - final INodesInPath dstIIP) + final INodesInPath dstIIP, + long genTimestamp) throws IOException { assert fsd.hasWriteLock(); @@ -84,6 +86,16 @@ private static SwapBlockListResult swapBlockList(FSDirectory fsd, INodeFile srcINodeFile = (INodeFile) srcIIP.getLastINode(); INodeFile dstINodeFile = (INodeFile) dstIIP.getLastINode(); + String errorPrefix = "DIR* FSDirectory.swapBlockList: "; + String error = "Swap Block List destination file "; + BlockInfo lastBlock = dstINodeFile.getLastBlock(); + if (lastBlock != null && lastBlock.getGenerationStamp() != genTimestamp) { + error += dstIIP.getPath() + + " has last block with different gen timestamp."; + NameNode.stateChangeLog.warn(errorPrefix + error); + throw new IOException(error); + } + long mtime = Time.now(); BlockInfo[] dstINodeFileBlocks = dstINodeFile.getBlocks(); dstINodeFile.replaceBlocks(srcINodeFile.getBlocks()); @@ -97,10 +109,15 @@ private static SwapBlockListResult swapBlockList(FSDirectory fsd, byte srcBlockLayoutPolicy = HeaderFormat.getBlockLayoutPolicy(srcHeader); - dstINodeFile.updateHeaderWithNewBlockLayoutPolicy(srcBlockLayoutPolicy); + byte dstStoragePolicyID = HeaderFormat.getStoragePolicyID(dstHeader); + byte srcStoragePolicyID = HeaderFormat.getStoragePolicyID(srcHeader); + + dstINodeFile.updateHeaderWithNewPolicy(srcBlockLayoutPolicy, + srcStoragePolicyID); dstINodeFile.setModificationTime(mtime); - srcINodeFile.updateHeaderWithNewBlockLayoutPolicy(dstBlockLayoutPolicy); + srcINodeFile.updateHeaderWithNewPolicy(dstBlockLayoutPolicy, + dstStoragePolicyID); srcINodeFile.setModificationTime(mtime); return new SwapBlockListResult(true, @@ -143,7 +160,6 @@ private static void validateInode(INodesInPath srcIIP) NameNode.stateChangeLog.warn(errorPrefix + error); throw new IOException(error); } - } static class SwapBlockListResult { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java index 84881c3562372..8ce515ee374ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.junit.After; @@ -100,7 +101,7 @@ public void testInputValidation() throws Exception { // Source file not found. try { fsn.swapBlockList("/TestSwapBlockList/dir1/fileXYZ", - "/TestSwapBlockList/dir1/dir11/file3"); + "/TestSwapBlockList/dir1/dir11/file3", 0L); Assert.fail(); } catch (IOException ioEx) { Assert.assertTrue(ioEx instanceof FileNotFoundException); @@ -111,7 +112,7 @@ public void testInputValidation() throws Exception { // Destination file not found. try { fsn.swapBlockList("/TestSwapBlockList/dir1/file1", - "/TestSwapBlockList/dir1/dir11/fileXYZ"); + "/TestSwapBlockList/dir1/dir11/fileXYZ", 0L); Assert.fail(); } catch (IOException ioEx) { Assert.assertTrue(ioEx instanceof FileNotFoundException); @@ -122,7 +123,7 @@ public void testInputValidation() throws Exception { // Source is Directory, not a file. try { fsn.swapBlockList("/TestSwapBlockList/dir1", - "/TestSwapBlockList/dir1/dir11/file3"); + "/TestSwapBlockList/dir1/dir11/file3", 0L); Assert.fail(); } catch (IOException ioEx) { Assert.assertTrue( @@ -138,7 +139,7 @@ public void testInputValidation() throws Exception { dstFile, FSDirectory.DirOp.WRITE).getLastINode(); dstInodeFile.toUnderConstruction("TestClient", "TestClientMachine"); try { - fsn.swapBlockList(sourceFile, dstFile); + fsn.swapBlockList(sourceFile, dstFile, 0L); Assert.fail(); } catch (IOException ioEx) { Assert.assertTrue( @@ -149,12 +150,27 @@ public void testInputValidation() throws Exception { SnapshotTestHelper.createSnapshot(hdfs, subDir2, "s0"); dstFile = "/TestSwapBlockList/dir2/file4"; try { - fsn.swapBlockList(sourceFile, dstFile); + fsn.swapBlockList(sourceFile, dstFile, 0L); Assert.fail(); } catch (IOException ioEx) { Assert.assertTrue( ioEx.getMessage().contains(dstFile + " is in a snapshot directory.")); } + + // Check if gen timestamp validation works. + dstFile = "/TestSwapBlockList/dir1/file2"; + dstInodeFile = (INodeFile) fsdir.resolvePath(fsdir.getPermissionChecker(), + dstFile, FSDirectory.DirOp.WRITE).getLastINode(); + long genStamp = dstInodeFile.getLastBlock().getGenerationStamp(); + dstInodeFile.getLastBlock().setGenerationStamp(genStamp + 1); + try { + fsn.swapBlockList(sourceFile, dstFile, genStamp); + Assert.fail(); + } catch (IOException ioEx) { + Assert.assertTrue( + ioEx.getMessage().contains(dstFile + + " has last block with different gen timestamp.")); + } } @Test @@ -175,7 +191,8 @@ public void testSwapBlockListOp() throws Exception { BlockInfo[] dstBlockLocationsBeforeSwap = dstInodeFile.getBlocks(); long dstHeader = dstInodeFile.getHeaderLong(); - fsn.swapBlockList(sourceFile, dstFile); + fsn.swapBlockList(sourceFile, dstFile, + dstInodeFile.getLastBlock().getGenerationStamp()); assertBlockListEquality(dstBlockLocationsBeforeSwap, srcInodeFile.getBlocks(), srcInodeFile.getId()); assertBlockListEquality(srcBlockLocationsBeforeSwap, @@ -186,6 +203,12 @@ public void testSwapBlockListOp() throws Exception { HeaderFormat.getBlockLayoutPolicy(dstInodeFile.getHeaderLong())); assertEquals(HeaderFormat.getBlockLayoutPolicy(dstHeader), HeaderFormat.getBlockLayoutPolicy(srcInodeFile.getHeaderLong())); + + // Assert Storage policy + assertEquals(HeaderFormat.getStoragePolicyID(srcHeader), + HeaderFormat.getStoragePolicyID(dstInodeFile.getHeaderLong())); + assertEquals(HeaderFormat.getStoragePolicyID(dstHeader), + HeaderFormat.getStoragePolicyID(srcInodeFile.getHeaderLong())); } @Test From db5214cd3a63d074041d01957893c9d662039a64 Mon Sep 17 00:00:00 2001 From: Aravindan Vijayan Date: Sat, 4 Jan 2020 20:31:52 -0800 Subject: [PATCH 5/5] HDFS-14989. Remove unused import. --- .../apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java index 8ce515ee374ff..c32d003fc4593 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSwapBlockList.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.junit.After;