Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.util.ChunkedArrayList;
import org.apache.hadoop.util.Time;
Expand Down Expand Up @@ -193,7 +194,7 @@ static INodesInPath unprotectedRenameTo(FSDirectory fsd,
}

validateNestSnapshot(fsd, src, dstParent.asDirectory(), snapshottableDirs);

checkUnderSameSnapshottableRoot(fsd, srcIIP, dstIIP);
fsd.ezManager.checkMoveValidity(srcIIP, dstIIP);
// Ensure dst has quota to accommodate rename
verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
Expand Down Expand Up @@ -407,6 +408,7 @@ static RenameResult unprotectedRenameTo(FSDirectory fsd,

validateNestSnapshot(fsd, src,
dstParent.asDirectory(), srcSnapshottableDirs);
checkUnderSameSnapshottableRoot(fsd, srcIIP, dstIIP);

// Ensure dst has quota to accommodate rename
verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
Expand Down Expand Up @@ -821,6 +823,29 @@ void updateQuotasInSourceTree(BlockStoragePolicySuite bsps) {
}
}

private static void checkUnderSameSnapshottableRoot(
FSDirectory fsd, INodesInPath srcIIP, INodesInPath dstIIP)
throws IOException {
// Ensure rename out of a snapshottable root is not permitted if ordered
// snapshot deletion feature is enabled
SnapshotManager snapshotManager = fsd.getFSNamesystem().
getSnapshotManager();
if (snapshotManager.isSnapshotDeletionOrdered() && fsd.getFSNamesystem()
.isSnapshotTrashRootEnabled()) {
INodeDirectory srcRoot = snapshotManager.
getSnapshottableAncestorDir(srcIIP);
INodeDirectory dstRoot = snapshotManager.
getSnapshottableAncestorDir(dstIIP);
// Ensure snapshoottable root for both src and dest are same.
if (srcRoot != dstRoot) {
String errMsg = "Source " + srcIIP.getPath() +
" and dest " + dstIIP.getPath() + " are not under " +
"the same snapshot root.";
throw new SnapshotException(errMsg);
}
}
}

private static RenameResult createRenameResult(FSDirectory fsd,
INodesInPath dst, boolean filesDeleted,
BlocksMapUpdateInfo collectedBlocks) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -384,9 +384,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
.getLogger(FSNamesystem.class.getName());

// The following are private configurations
static final String DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED =
public static final String DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED =
"dfs.namenode.snapshot.trashroot.enabled";
static final boolean DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED_DEFAULT = false;
public static final boolean DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED_DEFAULT
= false;

private final MetricsRegistry registry = new MetricsRegistry("FSNamesystem");
@Metric final MutableRatesWithAggregation detailedLockHoldTimeMetrics =
Expand Down Expand Up @@ -468,6 +469,7 @@ private void logAuditEvent(boolean succeeded,
private final UserGroupInformation fsOwner;
private final String supergroup;
private final boolean standbyShouldCheckpoint;
private final boolean isSnapshotTrashRootEnabled;
private final int snapshotDiffReportLimit;
private final int blockDeletionIncrement;

Expand Down Expand Up @@ -882,6 +884,9 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException {
// Get the checksum type from config
String checksumTypeStr = conf.get(DFS_CHECKSUM_TYPE_KEY,
DFS_CHECKSUM_TYPE_DEFAULT);
this.isSnapshotTrashRootEnabled = conf.getBoolean(
DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED,
DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED_DEFAULT);
DataChecksum.Type checksumType;
try {
checksumType = DataChecksum.Type.valueOf(checksumTypeStr);
Expand Down Expand Up @@ -909,8 +914,7 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException {
CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
""),
blockManager.getStoragePolicySuite().getDefaultPolicy().getId(),
conf.getBoolean(DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED,
DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED_DEFAULT));
isSnapshotTrashRootEnabled);

this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY,
DFS_NAMENODE_MAX_OBJECTS_DEFAULT);
Expand Down Expand Up @@ -1054,6 +1058,10 @@ public int getMaxListOpenFilesResponses() {
return maxListOpenFilesResponses;
}

boolean isSnapshotTrashRootEnabled() {
return isSnapshotTrashRootEnabled;
}

void lockRetryCache() {
if (retryCache != null) {
retryCache.lock();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -363,21 +363,35 @@ void assertFirstSnapshot(INodeDirectory dir,
* @param iip INodesInPath for the directory to get snapshot root.
* @return the snapshot root INodeDirectory
*/
public INodeDirectory checkAndGetSnapshottableAncestorDir(
final INodesInPath iip) throws IOException {
final INodeDirectory dir = getSnapshottableAncestorDir(iip);
if (dir == null) {
throw new SnapshotException("Directory is neither snapshottable nor" +
" under a snap root!");
}
return dir;
}

public INodeDirectory getSnapshottableAncestorDir(final INodesInPath iip)
throws IOException {
final String path = iip.getPath();
final INodeDirectory dir = INodeDirectory.valueOf(iip.getLastINode(), path);
final INode inode = iip.getLastINode();
final INodeDirectory dir;
if (inode instanceof INodeDirectory) {
dir = INodeDirectory.valueOf(inode, path);
} else {
dir = INodeDirectory.valueOf(iip.getINode(-2), iip.getParentPath());
}
if (dir.isSnapshottable()) {
return dir;
} else {
for (INodeDirectory snapRoot : this.snapshottables.values()) {
if (dir.isAncestorDirectory(snapRoot)) {
return snapRoot;
}
}
for (INodeDirectory snapRoot : this.snapshottables.values()) {
if (dir.isAncestorDirectory(snapRoot)) {
return snapRoot;
}
throw new SnapshotException("Directory is neither snapshottable nor" +
" under a snap root!");
}
return null;
}

public boolean isDescendantOfSnapshotRoot(INodeDirectory dir) {
Expand Down Expand Up @@ -641,7 +655,7 @@ public SnapshotDiffReport diff(final INodesInPath iip,
// All the check for path has been included in the valueOf method.
INodeDirectory snapshotRootDir;
if (this.snapshotDiffAllowSnapRootDescendant) {
snapshotRootDir = getSnapshottableAncestorDir(iip);
snapshotRootDir = checkAndGetSnapshottableAncestorDir(iip);
} else {
snapshotRootDir = getSnapshottableRoot(iip);
}
Expand Down Expand Up @@ -674,7 +688,7 @@ public SnapshotDiffReportListing diff(final INodesInPath iip,
// All the check for path has been included in the valueOf method.
INodeDirectory snapshotRootDir;
if (this.snapshotDiffAllowSnapRootDescendant) {
snapshotRootDir = getSnapshottableAncestorDir(iip);
snapshotRootDir = checkAndGetSnapshottableAncestorDir(iip);
} else {
snapshotRootDir = getSnapshottableRoot(iip);
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;


import java.io.IOException;

import static org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED;
import static org.apache.hadoop.hdfs.server.namenode.FSNamesystem.DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED;
/**
* Test Rename with ordered snapshot deletion.
*/
public class TestRenameWithOrderedSnapshotDeletion {
private final Path snapshottableDir
= new Path("/" + getClass().getSimpleName());
private DistributedFileSystem hdfs;
private MiniDFSCluster cluster;

@Before
public void setUp() throws Exception {
final Configuration conf = new Configuration();
conf.setBoolean(DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED, true);
conf.setBoolean(DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED, true);

cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
}

@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}

@Test(timeout = 60000)
public void testRename() throws Exception {
final Path dir1 = new Path("/dir1");
final Path dir2 = new Path("/dir2");
final Path sub0 = new Path(snapshottableDir, "sub0");
final Path sub1 = new Path(snapshottableDir, "sub1");
hdfs.mkdirs(sub0);
hdfs.mkdirs(dir2);
final Path file1 = new Path(dir1, "file1");
final Path file2 = new Path(sub0, "file2");
hdfs.mkdirs(snapshottableDir);
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
hdfs.mkdirs(sub0);
DFSTestUtil.createFile(hdfs, file1, 0, (short) 1, 0);
DFSTestUtil.createFile(hdfs, file2, 0, (short) 1, 0);
hdfs.allowSnapshot(snapshottableDir);
// rename from non snapshottable dir to snapshottable dir should fail
validateRename(file1, sub0);
hdfs.createSnapshot(snapshottableDir, "s0");
validateRename(file1, sub0);
// rename across non snapshottable dirs should work
hdfs.rename(file1, dir2);
// rename beyond snapshottable root should fail
validateRename(file2, dir1);
// rename within snapshottable root should work
hdfs.rename(file2, snapshottableDir);

// rename dirs outside snapshottable root should work
hdfs.rename(dir2, dir1);
// rename dir into snapshottable root should fail
validateRename(dir1, snapshottableDir);
// rename dir outside snapshottable root should fail
validateRename(sub0, dir2);
// rename dir within snapshottable root should work
hdfs.rename(sub0, sub1);
}

private void validateRename(Path src, Path dest) {
try {
hdfs.rename(src, dest);
Assert.fail("Expected exception not thrown.");
} catch (IOException ioe) {
Assert.assertTrue(ioe.getMessage().contains("are not under the" +
" same snapshot root."));
}
}
}