diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md index 9440b4e050a72..1e92666fa7a4e 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md @@ -973,6 +973,9 @@ There is no consistent return code from an attempt to delete the root directory. Implementations SHOULD return true; this avoids code which checks for a false return value from overreacting. +Note: Some of the object store based filesystem implementations always return +false when deleting the root. + ##### Empty (non-root) directory `recursive == False` Deleting an empty directory that is not root will remove the path from the FS and @@ -1019,6 +1022,9 @@ Any filesystem client which interacts with a remote filesystem which lacks such a security model, MAY reject calls to `delete("/", true)` on the basis that it makes it too easy to lose data. +Note: Some of the object store based filesystem implementations always return +false when deleting the root. + ##### Recursive delete of non-root directory Deleting a non-root path with children `recursive==true` diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index f9148ef47bbfb..340be51c5f447 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -2195,6 +2195,10 @@ void removeKeys( public boolean delete(Path f, boolean recursive) throws IOException { try { entryPoint(INVOCATION_DELETE); + if (f.isRoot()) { + return rejectRootDirectoryDelete(f, recursive); + } + boolean outcome = innerDelete(innerGetFileStatus(f, true), recursive); if (outcome) { try { @@ -2250,10 +2254,6 @@ private boolean innerDelete(S3AFileStatus status, boolean recursive) key = key + "/"; } - if (key.equals("/")) { - return rejectRootDirectoryDelete(status, recursive); - } - if (!recursive && status.isEmptyDirectory() == Tristate.FALSE) { throw new PathIsNotEmptyDirectoryException(f.toString()); } @@ -2308,29 +2308,16 @@ private boolean innerDelete(S3AFileStatus status, boolean recursive) * The caller must return the result of this call, rather than * attempt to continue with the delete operation: deleting root * directories is never allowed. This method simply implements - * the policy of when to return an exit code versus raise an exception. - * @param status filesystem status + * the policy of when to return false versus raise an exception. + * @param path path for deletion * @param recursive recursive flag from command * @return a return code for the operation * @throws PathIOException if the operation was explicitly rejected. */ - private boolean rejectRootDirectoryDelete(S3AFileStatus status, - boolean recursive) throws IOException { - LOG.info("s3a delete the {} root directory. Path: {}. Recursive: {}", - bucket, status.getPath(), recursive); - boolean emptyRoot = status.isEmptyDirectory() == Tristate.TRUE; - if (emptyRoot) { - return true; - } - if (recursive) { - LOG.error("Cannot delete root path: {}", status.getPath()); - return false; - } else { - // reject - String msg = "Cannot delete root path: " + status.getPath(); - LOG.error(msg); - throw new PathIOException(bucket, msg); - } + private boolean rejectRootDirectoryDelete(Path path, boolean recursive) { + LOG.error("S3A: Cannot delete the {} root directory. Path: {}. Recursive: " + + "{}", bucket, path, recursive); + return false; } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRootDir.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRootDir.java index 9ef3763bfc39a..3c41f07ba1565 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRootDir.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRootDir.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.junit.Ignore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -62,6 +63,11 @@ public S3AFileSystem getFileSystem() { return (S3AFileSystem) super.getFileSystem(); } + @Override + @Ignore("S3 always return false when non-recursively remove root dir") + public void testRmNonEmptyRootDirNonRecursive() throws Throwable { + } + /** * This is overridden to allow for eventual consistency on listings, * but only if the store does not have S3Guard protecting it. diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardDDBRootOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardDDBRootOperations.java index 74c406978e3e9..e2c95e194bbf6 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardDDBRootOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardDDBRootOperations.java @@ -234,7 +234,7 @@ public void test_400_rm_root_recursive() throws Throwable { assertDeleted(file, false); - assertTrue("Root directory delete failed", + assertFalse("Root directory delete failed", fs.delete(root, true)); ContractTestUtils.touch(fs, file2);