Skip to content

Commit 743c2e9

Browse files
HDFS-15316. Deletion failure should not remove directory from snapshottables. Contributed by hemanthboyina
1 parent 450e5aa commit 743c2e9

File tree

2 files changed

+48
-2
lines changed

2 files changed

+48
-2
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,10 +61,10 @@ static long delete(FSDirectory fsd, INodesInPath iip,
6161
removedUCFiles);
6262
if (unprotectedDelete(fsd, iip, context, mtime)) {
6363
filesRemoved = context.quotaDelta().getNsDelta();
64+
fsn.removeSnapshottableDirs(snapshottableDirs);
6465
}
6566
fsd.updateReplicationFactor(context.collectedBlocks()
6667
.toUpdateReplicationInfo());
67-
fsn.removeSnapshottableDirs(snapshottableDirs);
6868
fsd.updateCount(iip, context.quotaDelta(), false);
6969
}
7070
} finally {
@@ -144,9 +144,9 @@ static void deleteForEditLog(FSDirectory fsd, INodesInPath iip, long mtime)
144144
new ReclaimContext(fsd.getBlockStoragePolicySuite(),
145145
collectedBlocks, removedINodes, removedUCFiles),
146146
mtime);
147-
fsn.removeSnapshottableDirs(snapshottableDirs);
148147

149148
if (filesRemoved) {
149+
fsn.removeSnapshottableDirs(snapshottableDirs);
150150
fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, false);
151151
fsn.getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
152152
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,8 @@
6969

7070
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_DEFAULT;
7171
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY;
72+
import static org.junit.Assert.assertEquals;
73+
import static org.mockito.ArgumentMatchers.any;
7274
import static org.junit.Assert.assertNotEquals;
7375

7476
/**
@@ -513,4 +515,48 @@ public void testOpenRenameRace() throws Exception {
513515
}
514516
}
515517
}
518+
519+
/**
520+
* Test get snapshot diff on a directory which delete got failed.
521+
*/
522+
@Test
523+
public void testDeleteOnSnapshottableDir() throws Exception {
524+
conf.setBoolean(
525+
DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_ALLOW_SNAP_ROOT_DESCENDANT,
526+
true);
527+
try {
528+
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
529+
cluster.waitActive();
530+
DistributedFileSystem hdfs = cluster.getFileSystem();
531+
FSNamesystem fsn = cluster.getNamesystem();
532+
FSDirectory fsdir = fsn.getFSDirectory();
533+
Path dir = new Path("/dir");
534+
hdfs.mkdirs(dir);
535+
hdfs.allowSnapshot(dir);
536+
Path file1 = new Path(dir, "file1");
537+
Path file2 = new Path(dir, "file2");
538+
539+
// directory should not get deleted
540+
FSDirectory fsdir2 = Mockito.spy(fsdir);
541+
cluster.getNamesystem().setFSDirectory(fsdir2);
542+
Mockito.doReturn(-1L).when(fsdir2).removeLastINode(any());
543+
hdfs.delete(dir, true);
544+
545+
// create files and create snapshots
546+
DFSTestUtil.createFile(hdfs, file1, BLOCK_SIZE, (short) 1, 0);
547+
hdfs.createSnapshot(dir, "s1");
548+
DFSTestUtil.createFile(hdfs, file2, BLOCK_SIZE, (short) 1, 0);
549+
hdfs.createSnapshot(dir, "s2");
550+
551+
// should able to get snapshot diff on ancestor dir
552+
Path dirDir1 = new Path(dir, "dir1");
553+
hdfs.mkdirs(dirDir1);
554+
hdfs.getSnapshotDiffReport(dirDir1, "s2", "s1");
555+
assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs());
556+
} finally {
557+
if (cluster != null) {
558+
cluster.shutdown();
559+
}
560+
}
561+
}
516562
}

0 commit comments

Comments
 (0)