From 43642d068c9cea191cd2b07c9734b9c63fa0af7e Mon Sep 17 00:00:00 2001 From: sumangala Date: Sun, 27 Jun 2021 17:18:19 +0530 Subject: [PATCH 01/11] part 1: test sub-packages --- .../fs/azurebfs/AbstractAbfsIntegrationTest.java | 13 +++++++++++++ .../fs/azurebfs/services/ITestAbfsOutputStream.java | 7 +++---- .../fs/azurebfs/services/TestAbfsInputStream.java | 5 ++--- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java index 3befee48493fc..91f29fc0c1fc8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java @@ -25,6 +25,8 @@ import java.util.UUID; import java.util.concurrent.Callable; +import org.apache.commons.lang3.StringUtils; + import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -81,6 +83,7 @@ public abstract class AbstractAbfsIntegrationTest extends private AuthType authType; private boolean useConfiguredFileSystem = false; private boolean usingFilesystemForSASTests = false; + private static final int SHORTENED_GUID_LEN = 12; protected AbstractAbfsIntegrationTest() throws Exception { fileSystemName = TEST_CONTAINER_PREFIX + UUID.randomUUID().toString(); @@ -414,6 +417,16 @@ protected Path path(String filepath) throws IOException { new Path(getTestPath(), filepath)); } + /** + * Generate a unique path using the given filepath + * @param filepath path string + * @return unique path created from filepath and a GUID + */ + protected Path getUniquePath(String filepath) { + return new Path(filepath + StringUtils + .right(UUID.randomUUID().toString(), SHORTENED_GUID_LEN)); + } + /** * Get any Delegation Token manager created by the filesystem. * @return the DT manager or null. diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java index fff005114fbe0..bdd2178bc1664 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java @@ -23,7 +23,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest; import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem; import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys; @@ -32,7 +31,7 @@ * Test create operation. */ public class ITestAbfsOutputStream extends AbstractAbfsIntegrationTest { - private static final Path TEST_FILE_PATH = new Path("testfile"); + private static final String TEST_FILE_PATH = "testfile"; public ITestAbfsOutputStream() throws Exception { super(); @@ -42,7 +41,7 @@ public ITestAbfsOutputStream() throws Exception { public void testMaxRequestsAndQueueCapacityDefaults() throws Exception { Configuration conf = getRawConfiguration(); final AzureBlobFileSystem fs = getFileSystem(conf); - try (FSDataOutputStream out = fs.create(TEST_FILE_PATH)) { + try (FSDataOutputStream out = fs.create(getUniquePath(TEST_FILE_PATH))) { AbfsOutputStream stream = (AbfsOutputStream) out.getWrappedStream(); int maxConcurrentRequests @@ -71,7 +70,7 @@ public void testMaxRequestsAndQueueCapacity() throws Exception { conf.set(ConfigurationKeys.AZURE_WRITE_MAX_REQUESTS_TO_QUEUE, "" + maxRequestsToQueue); final AzureBlobFileSystem fs = getFileSystem(conf); - FSDataOutputStream out = fs.create(TEST_FILE_PATH); + FSDataOutputStream out = fs.create(getUniquePath(TEST_FILE_PATH)); AbfsOutputStream stream = (AbfsOutputStream) out.getWrappedStream(); if (stream.isAppendBlobStream()) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java index 3da004bafa4df..e2d31eb8e7b3a 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java @@ -576,7 +576,7 @@ public void testDefaultReadaheadQueueDepth() throws Exception { Configuration config = getRawConfiguration(); config.unset(FS_AZURE_READ_AHEAD_QUEUE_DEPTH); AzureBlobFileSystem fs = getFileSystem(config); - Path testFile = new Path("/testFile"); + Path testFile = getUniquePath("/testFile"); fs.create(testFile); FSDataInputStream in = fs.open(testFile); Assertions.assertThat( @@ -639,8 +639,7 @@ public AbfsInputStream testReadAheadConfigs(int readRequestSize, readAheadBlockSize = readRequestSize; } - Path testPath = new Path( - "/testReadAheadConfigs"); + Path testPath = getUniquePath("/testReadAheadConfigs"); final AzureBlobFileSystem fs = createTestFile(testPath, ALWAYS_READ_BUFFER_SIZE_TEST_FILE_SIZE, config); byte[] byteBuffer = new byte[ONE_MB]; From e85de23e2cf27ec1f923f6e17696f886ff94fb95 Mon Sep 17 00:00:00 2001 From: sumangala Date: Sun, 27 Jun 2021 18:08:05 +0530 Subject: [PATCH 02/11] part 2a: upto create test --- .../hadoop/fs/azurebfs/ITestAbfsClient.java | 11 ++++---- .../ITestAbfsListStatusRemoteIterator.java | 6 ++--- .../azurebfs/ITestAbfsReadWriteAndSeek.java | 7 ++--- .../azurebfs/ITestAbfsStreamStatistics.java | 4 +-- .../ITestAzureBlobFileSystemAppend.java | 12 ++++----- .../ITestAzureBlobFileSystemBackCompat.java | 9 ++++--- .../ITestAzureBlobFileSystemCheckAccess.java | 3 ++- .../ITestAzureBlobFileSystemCopy.java | 2 +- .../ITestAzureBlobFileSystemCreate.java | 26 ++++++++++++------- 9 files changed, 46 insertions(+), 34 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java index a4d645899049f..79992d874e245 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java @@ -91,7 +91,7 @@ public void testUnknownHost() throws Exception { public void testListPathWithValidListMaxResultsValues() throws IOException, ExecutionException, InterruptedException { final int fileCount = 10; - final String directory = "testWithValidListMaxResultsValues"; + final Path directory = getUniquePath("testWithValidListMaxResultsValues"); createDirectoryWithNFiles(directory, fileCount); final int[] testData = {fileCount + 100, fileCount + 1, fileCount, fileCount - 1, 1}; @@ -100,7 +100,7 @@ public void testListPathWithValidListMaxResultsValues() setListMaxResults(listMaxResults); int expectedListResultsSize = listMaxResults > fileCount ? fileCount : listMaxResults; - Assertions.assertThat(listPath(directory)).describedAs( + Assertions.assertThat(listPath(directory.toString())).describedAs( "AbfsClient.listPath result should contain %d items when " + "listMaxResults is %d and directory contains %d items", expectedListResultsSize, listMaxResults, fileCount) @@ -112,9 +112,10 @@ public void testListPathWithValidListMaxResultsValues() public void testListPathWithValueGreaterThanServerMaximum() throws IOException, ExecutionException, InterruptedException { setListMaxResults(LIST_MAX_RESULTS_SERVER + 100); - final String directory = "testWithValueGreaterThanServerMaximum"; + final Path directory = getUniquePath( + "testWithValueGreaterThanServerMaximum"); createDirectoryWithNFiles(directory, LIST_MAX_RESULTS_SERVER + 200); - Assertions.assertThat(listPath(directory)).describedAs( + Assertions.assertThat(listPath(directory.toString())).describedAs( "AbfsClient.listPath result will contain a maximum of %d items " + "even if listMaxResults >= %d or directory " + "contains more than %d items", LIST_MAX_RESULTS_SERVER, @@ -149,7 +150,7 @@ private void setListMaxResults(int listMaxResults) throws IOException { .setListMaxResults(listMaxResults); } - private void createDirectoryWithNFiles(String directory, int n) + private void createDirectoryWithNFiles(Path directory, int n) throws ExecutionException, InterruptedException { final List> tasks = new ArrayList<>(); ExecutorService es = Executors.newFixedThreadPool(10); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java index 6d5e4cf3bce2d..1765e9f004821 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java @@ -231,8 +231,8 @@ public void testHasNextForEmptyDir() throws Exception { @Test public void testHasNextForFile() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - String testFileName = "testFile"; - Path testFile = new Path(testFileName); + Path testFile = getUniquePath("testFile"); + String testFileName = testFile.toString(); getFileSystem().create(testFile); setPageSize(10); RemoteIterator fsItr = fs.listStatusIterator(testFile); @@ -297,7 +297,7 @@ public String listStatus(Path path, String startFrom, private Path createTestDirectory() throws IOException { String testDirectoryName = "testDirectory" + System.currentTimeMillis(); - Path testDirectory = new Path(testDirectoryName); + Path testDirectory = getUniquePath(testDirectoryName); getFileSystem().mkdirs(testDirectory); return testDirectory; } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java index 52abb097ef311..e8c09c7ce8da0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java @@ -41,7 +41,7 @@ */ @RunWith(Parameterized.class) public class ITestAbfsReadWriteAndSeek extends AbstractAbfsScaleTest { - private static final Path TEST_PATH = new Path("/testfile"); + private static final String TEST_PATH = "/testfile"; @Parameterized.Parameters(name = "Size={0}") public static Iterable sizes() { @@ -73,13 +73,14 @@ private void testReadWriteAndSeek(int bufferSize) throws Exception { final byte[] b = new byte[2 * bufferSize]; new Random().nextBytes(b); - try (FSDataOutputStream stream = fs.create(TEST_PATH)) { + Path testPath = getUniquePath(TEST_PATH); + try (FSDataOutputStream stream = fs.create(testPath)) { stream.write(b); } final byte[] readBuffer = new byte[2 * bufferSize]; int result; - try (FSDataInputStream inputStream = fs.open(TEST_PATH)) { + try (FSDataInputStream inputStream = fs.open(testPath)) { inputStream.seek(bufferSize); result = inputStream.read(readBuffer, bufferSize, bufferSize); assertNotEquals(-1, result); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java index 7eadb4bb8ff23..9104657730346 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java @@ -52,8 +52,8 @@ public void testAbfsStreamOps() throws Exception { + "Abfs"); final AzureBlobFileSystem fs = getFileSystem(); - Path smallOperationsFile = new Path("testOneReadWriteOps"); - Path largeOperationsFile = new Path("testLargeReadWriteOps"); + Path smallOperationsFile = getUniquePath("testOneReadWriteOps"); + Path largeOperationsFile = getUniquePath("testLargeReadWriteOps"); FileSystem.Statistics statistics = fs.getFsStatistics(); String testReadWriteOps = "test this"; statistics.reset(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java index cbe19396d1277..82d0415a1d013 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java @@ -32,8 +32,8 @@ */ public class ITestAzureBlobFileSystemAppend extends AbstractAbfsIntegrationTest { - private static final Path TEST_FILE_PATH = new Path("testfile"); - private static final Path TEST_FOLDER_PATH = new Path("testFolder"); + private static final String TEST_FILE_PATH = "testfile"; + private static final String TEST_FOLDER_PATH = "testFolder"; public ITestAzureBlobFileSystemAppend() throws Exception { super(); @@ -42,7 +42,7 @@ public ITestAzureBlobFileSystemAppend() throws Exception { @Test(expected = FileNotFoundException.class) public void testAppendDirShouldFail() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path filePath = TEST_FILE_PATH; + final Path filePath = getUniquePath(TEST_FILE_PATH); fs.mkdirs(filePath); fs.append(filePath, 0); } @@ -50,7 +50,7 @@ public void testAppendDirShouldFail() throws Exception { @Test public void testAppendWithLength0() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - try(FSDataOutputStream stream = fs.create(TEST_FILE_PATH)) { + try(FSDataOutputStream stream = fs.create(getUniquePath(TEST_FILE_PATH))) { final byte[] b = new byte[1024]; new Random().nextBytes(b); stream.write(b, 1000, 0); @@ -62,7 +62,7 @@ public void testAppendWithLength0() throws Exception { @Test(expected = FileNotFoundException.class) public void testAppendFileAfterDelete() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path filePath = TEST_FILE_PATH; + final Path filePath = getUniquePath(TEST_FILE_PATH); ContractTestUtils.touch(fs, filePath); fs.delete(filePath, false); @@ -72,7 +72,7 @@ public void testAppendFileAfterDelete() throws Exception { @Test(expected = FileNotFoundException.class) public void testAppendDirectory() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path folderPath = TEST_FOLDER_PATH; + final Path folderPath = getUniquePath(TEST_FOLDER_PATH); fs.mkdirs(folderPath); fs.append(folderPath); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java index ffa3c8cf7d8fa..979708edced10 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java @@ -50,13 +50,16 @@ public void testBlobBackCompat() throws Exception { CloudBlobContainer container = blobClient.getContainerReference(this.getFileSystemName()); container.createIfNotExists(); - CloudBlockBlob blockBlob = container.getBlockBlobReference("test/10/10/10"); + Path testPath = getUniquePath("test"); + CloudBlockBlob blockBlob = container + .getBlockBlobReference(testPath + "/10/10/10"); blockBlob.uploadText(""); - blockBlob = container.getBlockBlobReference("test/10/123/3/2/1/3"); + blockBlob = container.getBlockBlobReference(testPath + "/10/123/3/2/1/3"); blockBlob.uploadText(""); - FileStatus[] fileStatuses = fs.listStatus(new Path("/test/10/")); + FileStatus[] fileStatuses = fs + .listStatus(new Path(String.format("/%s/10/", testPath))); assertEquals(2, fileStatuses.length); assertEquals("10", fileStatuses[0].getPath().getName()); assertTrue(fileStatuses[0].isDirectory()); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java index ebd64812d45b9..21c62665e75b2 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java @@ -350,7 +350,8 @@ private void modifyAcl(Path file, String uid, FsAction fsAction) private Path setupTestDirectoryAndUserAccess(String testFileName, FsAction fsAction) throws Exception { - Path file = new Path(TEST_FOLDER_PATH + testFileName); + Path testPath = getUniquePath(TEST_FOLDER_PATH); + Path file = new Path(testPath + testFileName); file = this.superUserFs.makeQualified(file); this.superUserFs.delete(file, true); this.superUserFs.create(file); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java index 917ee9ce1b07e..e0078fb0ff7b8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java @@ -53,7 +53,7 @@ public void testCopyFromLocalFileSystem() throws Exception { localFs.delete(localFilePath, true); try { writeString(localFs, localFilePath, "Testing"); - Path dstPath = new Path("copiedFromLocal"); + Path dstPath = getUniquePath("copiedFromLocal"); assertTrue(FileUtil.copy(localFs, localFilePath, fs, dstPath, false, fs.getConf())); assertIsFile(fs, dstPath); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java index 09304d1ec218d..92286a97f16ee 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java @@ -66,7 +66,7 @@ public class ITestAzureBlobFileSystemCreate extends AbstractAbfsIntegrationTest { private static final Path TEST_FILE_PATH = new Path("testfile"); - private static final Path TEST_FOLDER_PATH = new Path("testFolder"); + private static final String TEST_FOLDER_PATH = "testFolder"; private static final String TEST_CHILD_FILE = "childFile"; public ITestAzureBlobFileSystemCreate() throws Exception { @@ -89,13 +89,14 @@ public void testEnsureFileCreatedImmediately() throws Exception { @SuppressWarnings("deprecation") public void testCreateNonRecursive() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testFile = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = getUniquePath(TEST_FOLDER_PATH); + Path testFile = new Path(testFolderPath, TEST_CHILD_FILE); try { fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null); fail("Should've thrown"); } catch (FileNotFoundException expected) { } - fs.mkdirs(TEST_FOLDER_PATH); + fs.mkdirs(testFolderPath); fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null) .close(); assertIsFile(fs, testFile); @@ -105,13 +106,14 @@ public void testCreateNonRecursive() throws Exception { @SuppressWarnings("deprecation") public void testCreateNonRecursive1() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testFile = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = getUniquePath(TEST_FOLDER_PATH); + Path testFile = new Path(testFolderPath, TEST_CHILD_FILE); try { fs.createNonRecursive(testFile, FsPermission.getDefault(), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 1024, (short) 1, 1024, null); fail("Should've thrown"); } catch (FileNotFoundException expected) { } - fs.mkdirs(TEST_FOLDER_PATH); + fs.mkdirs(testFolderPath); fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null) .close(); assertIsFile(fs, testFile); @@ -123,13 +125,14 @@ public void testCreateNonRecursive1() throws Exception { public void testCreateNonRecursive2() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testFile = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = getUniquePath(TEST_FOLDER_PATH); + Path testFile = new Path(testFolderPath, TEST_CHILD_FILE); try { fs.createNonRecursive(testFile, FsPermission.getDefault(), false, 1024, (short) 1, 1024, null); fail("Should've thrown"); } catch (FileNotFoundException e) { } - fs.mkdirs(TEST_FOLDER_PATH); + fs.mkdirs(testFolderPath); fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null) .close(); assertIsFile(fs, testFile); @@ -141,7 +144,8 @@ public void testCreateNonRecursive2() throws Exception { @Test public void testWriteAfterClose() throws Throwable { final AzureBlobFileSystem fs = getFileSystem(); - Path testPath = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = getUniquePath(TEST_FOLDER_PATH); + Path testPath = new Path(testFolderPath, TEST_CHILD_FILE); FSDataOutputStream out = fs.create(testPath); out.close(); intercept(IOException.class, () -> out.write('a')); @@ -161,7 +165,8 @@ public void testWriteAfterClose() throws Throwable { @Test public void testTryWithResources() throws Throwable { final AzureBlobFileSystem fs = getFileSystem(); - Path testPath = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = getUniquePath(TEST_FOLDER_PATH); + Path testPath = new Path(testFolderPath, TEST_CHILD_FILE); try (FSDataOutputStream out = fs.create(testPath)) { out.write('1'); out.hsync(); @@ -194,7 +199,8 @@ public void testTryWithResources() throws Throwable { @Test public void testFilterFSWriteAfterClose() throws Throwable { final AzureBlobFileSystem fs = getFileSystem(); - Path testPath = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = getUniquePath(TEST_FOLDER_PATH); + Path testPath = new Path(testFolderPath, TEST_CHILD_FILE); FSDataOutputStream out = fs.create(testPath); intercept(FileNotFoundException.class, () -> { From c501cd70bf47fe562195c6fa40a0a57c7f98997a Mon Sep 17 00:00:00 2001 From: sumangala Date: Sun, 27 Jun 2021 19:12:03 +0530 Subject: [PATCH 03/11] part 2b: upto rn unicode --- ...ITestAzureBlobFileSystemDelegationSAS.java | 2 +- .../ITestAzureBlobFileSystemDelete.java | 27 ++++++----- .../azurebfs/ITestAzureBlobFileSystemE2E.java | 18 +++---- .../ITestAzureBlobFileSystemFileStatus.java | 21 ++++---- .../ITestAzureBlobFileSystemListStatus.java | 23 ++++----- .../ITestAzureBlobFileSystemMkDir.java | 6 +-- .../ITestAzureBlobFileSystemOauth.java | 48 ++++++++++--------- .../ITestAzureBlobFileSystemRandomRead.java | 19 ++++---- .../ITestAzureBlobFileSystemRename.java | 38 ++++++++------- ...ITestAzureBlobFileSystemRenameUnicode.java | 2 +- 10 files changed, 108 insertions(+), 96 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java index 50ce257b4a844..eb553bed10ec3 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java @@ -395,7 +395,7 @@ public void testProperties() throws Exception { @Test public void testSignatureMask() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - String src = "/testABC/test.xt"; + String src = String.format("/testABC/test%s.xt", UUID.randomUUID()); fs.create(new Path(src)); AbfsRestOperation abfsHttpRestOperation = fs.getAbfsClient() .renamePath(src, "/testABC" + "/abc.txt", null); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java index 9bd82dbb03df6..c6885dc8bc8e0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java @@ -76,12 +76,13 @@ public ITestAzureBlobFileSystemDelete() throws Exception { public void testDeleteRoot() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - fs.mkdirs(new Path("/testFolder0")); - fs.mkdirs(new Path("/testFolder1")); - fs.mkdirs(new Path("/testFolder2")); - touch(new Path("/testFolder1/testfile")); - touch(new Path("/testFolder1/testfile2")); - touch(new Path("/testFolder1/testfile3")); + Path testPath = getUniquePath("/testFolder"); + fs.mkdirs(new Path(testPath + "_0")); + fs.mkdirs(new Path(testPath + "_1")); + fs.mkdirs(new Path(testPath + "_2")); + touch(new Path(testPath + "_1/testfile")); + touch(new Path(testPath + "_1/testfile2")); + touch(new Path(testPath + "_1/testfile3")); Path root = new Path("/"); FileStatus[] ls = fs.listStatus(root); @@ -95,7 +96,7 @@ public void testDeleteRoot() throws Exception { @Test() public void testOpenFileAfterDelete() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testfile = new Path("/testFile"); + Path testfile = getUniquePath("/testFile"); touch(testfile); assertDeleted(fs, testfile, false); @@ -106,7 +107,7 @@ public void testOpenFileAfterDelete() throws Exception { @Test public void testEnsureFileIsDeleted() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testfile = new Path("testfile"); + Path testfile = getUniquePath("testfile"); touch(testfile); assertDeleted(fs, testfile, false); assertPathDoesNotExist(fs, "deleted", testfile); @@ -115,10 +116,10 @@ public void testEnsureFileIsDeleted() throws Exception { @Test public void testDeleteDirectory() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path dir = new Path("testfile"); + Path dir = getUniquePath("testfile"); fs.mkdirs(dir); - fs.mkdirs(new Path("testfile/test1")); - fs.mkdirs(new Path("testfile/test1/test2")); + fs.mkdirs(new Path(dir + "/test1")); + fs.mkdirs(new Path(dir + "/test1/test2")); assertDeleted(fs, dir, true); assertPathDoesNotExist(fs, "deleted", dir); @@ -130,8 +131,9 @@ public void testDeleteFirstLevelDirectory() throws Exception { final List> tasks = new ArrayList<>(); ExecutorService es = Executors.newFixedThreadPool(10); + Path dir = getUniquePath("/test"); for (int i = 0; i < 1000; i++) { - final Path fileName = new Path("/test/" + i); + final Path fileName = new Path(dir + "/" + i); Callable callable = new Callable() { @Override public Void call() throws Exception { @@ -148,7 +150,6 @@ public Void call() throws Exception { } es.shutdownNow(); - Path dir = new Path("/test"); // first try a non-recursive delete, expect failure intercept(FileAlreadyExistsException.class, () -> fs.delete(dir, false)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java index 05c3855f5c89d..1034efcd261e0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java @@ -52,14 +52,14 @@ public ITestAzureBlobFileSystemE2E() throws Exception { @Test public void testWriteOneByteToFile() throws Exception { - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = getUniquePath(methodName.getMethodName()); testWriteOneByteToFile(testFilePath); } @Test public void testReadWriteBytesToFile() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = getUniquePath(methodName.getMethodName()); testWriteOneByteToFile(testFilePath); try(FSDataInputStream inputStream = fs.open(testFilePath, TEST_DEFAULT_BUFFER_SIZE)) { @@ -78,7 +78,7 @@ public void testOOBWritesAndReadFail() throws Exception { final byte[] b = new byte[2 * readBufferSize]; new Random().nextBytes(b); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = getUniquePath(methodName.getMethodName()); try(FSDataOutputStream writeStream = fs.create(testFilePath)) { writeStream.write(b); writeStream.flush(); @@ -107,7 +107,7 @@ public void testOOBWritesAndReadSucceed() throws Exception { byte[] bytesToRead = new byte[readBufferSize]; final byte[] b = new byte[2 * readBufferSize]; new Random().nextBytes(b); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = getUniquePath(methodName.getMethodName()); try (FSDataOutputStream writeStream = fs.create(testFilePath)) { writeStream.write(b); @@ -130,7 +130,7 @@ public void testOOBWritesAndReadSucceed() throws Exception { @Test public void testWriteWithBufferOffset() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = getUniquePath(methodName.getMethodName()); final byte[] b = new byte[1024 * 1000]; new Random().nextBytes(b); @@ -151,7 +151,7 @@ public void testWriteWithBufferOffset() throws Exception { @Test public void testReadWriteHeavyBytesToFileWithSmallerChunks() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = getUniquePath(methodName.getMethodName()); final byte[] writeBuffer = new byte[5 * 1000 * 1024]; new Random().nextBytes(writeBuffer); @@ -171,7 +171,7 @@ public void testReadWriteHeavyBytesToFileWithSmallerChunks() throws Exception { @Test public void testReadWithFileNotFoundException() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = getUniquePath(methodName.getMethodName()); testWriteOneByteToFile(testFilePath); FSDataInputStream inputStream = fs.open(testFilePath, TEST_DEFAULT_BUFFER_SIZE); @@ -185,7 +185,7 @@ public void testReadWithFileNotFoundException() throws Exception { @Test public void testWriteWithFileNotFoundException() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = getUniquePath(methodName.getMethodName()); FSDataOutputStream stream = fs.create(testFilePath); assertTrue(fs.exists(testFilePath)); @@ -202,7 +202,7 @@ public void testWriteWithFileNotFoundException() throws Exception { @Test public void testFlushWithFileNotFoundException() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = getUniquePath(methodName.getMethodName()); if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(testFilePath).toString())) { return; } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java index 421fa9a65cc05..0109534963b98 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java @@ -37,8 +37,8 @@ public class ITestAzureBlobFileSystemFileStatus extends private static final String DEFAULT_UMASK_VALUE = "027"; private static final String FULL_PERMISSION = "777"; - private static final Path TEST_FILE = new Path("testFile"); - private static final Path TEST_FOLDER = new Path("testDir"); + private static final String TEST_FILE = "testFile"; + private static final String TEST_FOLDER = "testDir"; public ITestAzureBlobFileSystemFileStatus() throws Exception { super(); @@ -57,8 +57,9 @@ public void testEnsureStatusWorksForRoot() throws Exception { public void testFileStatusPermissionsAndOwnerAndGroup() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); fs.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, DEFAULT_UMASK_VALUE); - touch(TEST_FILE); - validateStatus(fs, TEST_FILE, false); + Path testFile = getUniquePath(TEST_FILE); + touch(testFile); + validateStatus(fs, testFile, false); } private FileStatus validateStatus(final AzureBlobFileSystem fs, final Path name, final boolean isDir) @@ -93,18 +94,20 @@ private FileStatus validateStatus(final AzureBlobFileSystem fs, final Path name, public void testFolderStatusPermissionsAndOwnerAndGroup() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); fs.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, DEFAULT_UMASK_VALUE); - fs.mkdirs(TEST_FOLDER); + Path testFolder = getUniquePath(TEST_FOLDER); + fs.mkdirs(testFolder); - validateStatus(fs, TEST_FOLDER, true); + validateStatus(fs, testFolder, true); } @Test public void testAbfsPathWithHost() throws IOException { AzureBlobFileSystem fs = this.getFileSystem(); - Path pathWithHost1 = new Path("abfs://mycluster/abfs/file1.txt"); + Path host = getUniquePath("mycluster"); + Path pathWithHost1 = new Path(String.format("abfs://%s/abfs/file1.txt", host)); Path pathwithouthost1 = new Path("/abfs/file1.txt"); - Path pathWithHost2 = new Path("abfs://mycluster/abfs/file2.txt"); + Path pathWithHost2 = new Path(String.format("abfs://%s/abfs/file2.txt", host)); Path pathwithouthost2 = new Path("/abfs/file2.txt"); // verify compatibility of this path format @@ -125,7 +128,7 @@ public void testAbfsPathWithHost() throws IOException { @Test public void testLastModifiedTime() throws IOException { AzureBlobFileSystem fs = this.getFileSystem(); - Path testFilePath = new Path("childfile1.txt"); + Path testFilePath = getUniquePath("childfile1.txt"); long createStartTime = System.currentTimeMillis(); long minCreateStartTime = (createStartTime / 1000) * 1000 - 1; // Dividing and multiplying by 1000 to make last 3 digits 0. diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java index 31f92d2bd3890..e7bfe06dbd50b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java @@ -94,7 +94,7 @@ public Void call() throws Exception { @Test public void testListFileVsListDir() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path path = new Path("/testFile"); + Path path = getUniquePath("/testFile"); try(FSDataOutputStream ignored = fs.create(path)) { FileStatus[] testFiles = fs.listStatus(path); assertEquals("length of test files", 1, testFiles.length); @@ -106,19 +106,20 @@ public void testListFileVsListDir() throws Exception { @Test public void testListFileVsListDir2() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - fs.mkdirs(new Path("/testFolder")); - fs.mkdirs(new Path("/testFolder/testFolder2")); - fs.mkdirs(new Path("/testFolder/testFolder2/testFolder3")); - Path testFile0Path = new Path("/testFolder/testFolder2/testFolder3/testFile"); + Path testFolder = getUniquePath("/testFolder"); + fs.mkdirs(testFolder); + fs.mkdirs(new Path(testFolder + "/testFolder2")); + fs.mkdirs(new Path(testFolder + "/testFolder2/testFolder3")); + Path testFile0Path = new Path( + testFolder + "/testFolder2/testFolder3/testFile"); ContractTestUtils.touch(fs, testFile0Path); FileStatus[] testFiles = fs.listStatus(testFile0Path); assertEquals("Wrong listing size of file " + testFile0Path, 1, testFiles.length); FileStatus file0 = testFiles[0]; - assertEquals("Wrong path for " + file0, - new Path(getTestUrl(), "/testFolder/testFolder2/testFolder3/testFile"), - file0.getPath()); + assertEquals("Wrong path for " + file0, new Path(getTestUrl(), + testFolder + "/testFolder2/testFolder3/testFile"), file0.getPath()); assertIsFileReference(file0); } @@ -131,18 +132,18 @@ public void testListNonExistentDir() throws Exception { @Test public void testListFiles() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testDir = new Path("/test"); + Path testDir = getUniquePath("/test"); fs.mkdirs(testDir); FileStatus[] fileStatuses = fs.listStatus(new Path("/")); assertEquals(1, fileStatuses.length); - fs.mkdirs(new Path("/test/sub")); + fs.mkdirs(new Path(testDir + "/sub")); fileStatuses = fs.listStatus(testDir); assertEquals(1, fileStatuses.length); assertEquals("sub", fileStatuses[0].getPath().getName()); assertIsDirectoryReference(fileStatuses[0]); - Path childF = fs.makeQualified(new Path("/test/f")); + Path childF = fs.makeQualified(new Path(testDir + "/f")); touch(childF); fileStatuses = fs.listStatus(testDir); assertEquals(2, fileStatuses.length); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java index 0db9529326702..48ab2b30b03c9 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java @@ -48,7 +48,7 @@ public void testCreateDirWithExistingDir() throws Exception { Assume.assumeTrue(DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE || !getFileSystem() .getIsNamespaceEnabled()); final AzureBlobFileSystem fs = getFileSystem(); - Path path = new Path("testFolder"); + Path path = getUniquePath("testFolder"); assertMkdirs(fs, path); assertMkdirs(fs, path); } @@ -63,7 +63,7 @@ public void testMkdirExistingDirOverwriteFalse() throws Exception { Configuration config = new Configuration(this.getRawConfiguration()); config.set(FS_AZURE_ENABLE_MKDIR_OVERWRITE, Boolean.toString(false)); AzureBlobFileSystem fs = getFileSystem(config); - Path path = new Path("testFolder"); + Path path = getUniquePath("testFolder"); assertMkdirs(fs, path); //checks that mkdirs returns true long timeCreated = fs.getFileStatus(path).getModificationTime(); assertMkdirs(fs, path); //call to existing dir should return success @@ -77,7 +77,7 @@ public void createDirWithExistingFilename() throws Exception { DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE && getFileSystem() .getIsNamespaceEnabled()); final AzureBlobFileSystem fs = getFileSystem(); - Path path = new Path("testFilePath"); + Path path = getUniquePath("testFilePath"); fs.create(path); assertTrue(fs.getFileStatus(path).isFile()); intercept(FileAlreadyExistsException.class, () -> fs.mkdirs(path)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java index e517f685784e7..03538d1d241d1 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java @@ -53,8 +53,8 @@ public class ITestAzureBlobFileSystemOauth extends AbstractAbfsIntegrationTest{ private static final Path FILE_PATH = new Path("/testFile"); - private static final Path EXISTED_FILE_PATH = new Path("/existedFile"); - private static final Path EXISTED_FOLDER_PATH = new Path("/existedFolder"); + private static final String EXISTED_FILE_PATH = "/existedFile"; + private static final String EXISTED_FOLDER_PATH = "/existedFolder"; private static final Logger LOG = LoggerFactory.getLogger(ITestAbfsStreamStatistics.class); @@ -71,7 +71,9 @@ public void testBlobDataContributor() throws Exception { String secret = this.getConfiguration().get(TestConfigurationKeys.FS_AZURE_BLOB_DATA_CONTRIBUTOR_CLIENT_SECRET); Assume.assumeTrue("Contributor client secret not provided", secret != null); - prepareFiles(); + Path existedFilePath = getUniquePath(EXISTED_FILE_PATH); + Path existedFolderPath = getUniquePath(EXISTED_FOLDER_PATH); + prepareFiles(existedFilePath, existedFolderPath); final AzureBlobFileSystem fs = getBlobConributor(); @@ -89,29 +91,29 @@ public void testBlobDataContributor() throws Exception { // Verify Blob Data Contributor has full access to existed folder, file // READ FOLDER - assertTrue(fs.exists(EXISTED_FOLDER_PATH)); + assertTrue(fs.exists(existedFolderPath)); //DELETE FOLDER - fs.delete(EXISTED_FOLDER_PATH, true); - assertFalse(fs.exists(EXISTED_FOLDER_PATH)); + fs.delete(existedFolderPath, true); + assertFalse(fs.exists(existedFolderPath)); // READ FILE - try (FSDataInputStream stream = fs.open(EXISTED_FILE_PATH)) { + try (FSDataInputStream stream = fs.open(existedFilePath)) { assertTrue(stream.read() != 0); } - assertEquals(0, fs.getFileStatus(EXISTED_FILE_PATH).getLen()); + assertEquals(0, fs.getFileStatus(existedFilePath).getLen()); // WRITE FILE - try (FSDataOutputStream stream = fs.append(EXISTED_FILE_PATH)) { + try (FSDataOutputStream stream = fs.append(existedFilePath)) { stream.write(0); } - assertEquals(1, fs.getFileStatus(EXISTED_FILE_PATH).getLen()); + assertEquals(1, fs.getFileStatus(existedFilePath).getLen()); // REMOVE FILE - fs.delete(EXISTED_FILE_PATH, true); - assertFalse(fs.exists(EXISTED_FILE_PATH)); + fs.delete(existedFilePath, true); + assertFalse(fs.exists(existedFilePath)); } /* @@ -124,7 +126,9 @@ public void testBlobDataReader() throws Exception { String secret = this.getConfiguration().get(TestConfigurationKeys.FS_AZURE_BLOB_DATA_READER_CLIENT_SECRET); Assume.assumeTrue("Reader client secret not provided", secret != null); - prepareFiles(); + Path existedFilePath = getUniquePath(EXISTED_FILE_PATH); + Path existedFolderPath = getUniquePath(EXISTED_FOLDER_PATH); + prepareFiles(existedFilePath, existedFolderPath); final AzureBlobFileSystem fs = getBlobReader(); // Use abfsStore in this test to verify the ERROR code in AbfsRestOperationException @@ -132,23 +136,23 @@ public void testBlobDataReader() throws Exception { // TEST READ FS Map properties = abfsStore.getFilesystemProperties(); // TEST READ FOLDER - assertTrue(fs.exists(EXISTED_FOLDER_PATH)); + assertTrue(fs.exists(existedFolderPath)); // TEST DELETE FOLDER try { - abfsStore.delete(EXISTED_FOLDER_PATH, true); + abfsStore.delete(existedFolderPath, true); } catch (AbfsRestOperationException e) { assertEquals(AzureServiceErrorCode.AUTHORIZATION_PERMISSION_MISS_MATCH, e.getErrorCode()); } // TEST READ FILE - try (InputStream inputStream = abfsStore.openFileForRead(EXISTED_FILE_PATH, null)) { + try (InputStream inputStream = abfsStore.openFileForRead(existedFilePath, null)) { assertTrue(inputStream.read() != 0); } // TEST WRITE FILE try { - abfsStore.openFileForWrite(EXISTED_FILE_PATH, fs.getFsStatistics(), true); + abfsStore.openFileForWrite(existedFilePath, fs.getFsStatistics(), true); } catch (AbfsRestOperationException e) { assertEquals(AzureServiceErrorCode.AUTHORIZATION_PERMISSION_MISS_MATCH, e.getErrorCode()); } finally { @@ -157,14 +161,14 @@ public void testBlobDataReader() throws Exception { } - private void prepareFiles() throws IOException { + private void prepareFiles(Path existedFilePath, Path existedFolderPath) throws IOException { // create test files/folders to verify access control diff between // Blob data contributor and Blob data reader final AzureBlobFileSystem fs = this.getFileSystem(); - fs.create(EXISTED_FILE_PATH); - assertTrue(fs.exists(EXISTED_FILE_PATH)); - fs.mkdirs(EXISTED_FOLDER_PATH); - assertTrue(fs.exists(EXISTED_FOLDER_PATH)); + fs.create(existedFilePath); + assertTrue(fs.exists(existedFilePath)); + fs.mkdirs(existedFolderPath); + assertTrue(fs.exists(existedFolderPath)); } private AzureBlobFileSystem getBlobConributor() throws Exception { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java index ef531acb2bbbc..93036d45f0661 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java @@ -86,7 +86,7 @@ public ITestAzureBlobFileSystemRandomRead() throws Exception { @Test public void testBasicRead() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testBasicRead"); + Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testBasicRead"); assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -115,7 +115,7 @@ public void testBasicRead() throws Exception { public void testRandomRead() throws Exception { Assume.assumeFalse("This test does not support namespace enabled account", this.getFileSystem().getIsNamespaceEnabled()); - Path testPath = new Path(TEST_FILE_PREFIX + "_testRandomRead"); + Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testRandomRead"); assumeHugeFileExists(testPath); try ( @@ -174,7 +174,7 @@ public void testRandomRead() throws Exception { */ @Test public void testSeekToNewSource() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testSeekToNewSource"); + Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testSeekToNewSource"); assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -189,7 +189,7 @@ public void testSeekToNewSource() throws Exception { */ @Test public void testSkipBounds() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testSkipBounds"); + Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testSkipBounds"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -230,7 +230,7 @@ public Long call() throws Exception { */ @Test public void testValidateSeekBounds() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testValidateSeekBounds"); + Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testValidateSeekBounds"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -281,7 +281,7 @@ public FSDataInputStream call() throws Exception { */ @Test public void testSeekAndAvailableAndPosition() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testSeekAndAvailableAndPosition"); + Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testSeekAndAvailableAndPosition"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -347,7 +347,7 @@ public void testSeekAndAvailableAndPosition() throws Exception { */ @Test public void testSkipAndAvailableAndPosition() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testSkipAndAvailableAndPosition"); + Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testSkipAndAvailableAndPosition"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -413,7 +413,8 @@ public void testSkipAndAvailableAndPosition() throws Exception { @Test public void testSequentialReadAfterReverseSeekPerformance() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testSequentialReadAfterReverseSeekPerformance"); + Path testPath = getUniquePath(TEST_FILE_PREFIX + + "_testSequentialReadAfterReverseSeekPerformance"); assumeHugeFileExists(testPath); final int maxAttempts = 10; final double maxAcceptableRatio = 1.01; @@ -446,7 +447,7 @@ public void testSequentialReadAfterReverseSeekPerformance() public void testRandomReadPerformance() throws Exception { Assume.assumeFalse("This test does not support namespace enabled account", this.getFileSystem().getIsNamespaceEnabled()); - Path testPath = new Path(TEST_FILE_PREFIX + "_testRandomReadPerformance"); + Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testRandomReadPerformance"); assumeHugeFileExists(testPath); final AzureBlobFileSystem abFs = this.getFileSystem(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java index 2adf70ca6457d..c67f2f8d85cdb 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java @@ -95,13 +95,13 @@ public void testRenameWithPreExistingDestination() throws Exception { @Test public void testRenameFileUnderDir() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path sourceDir = new Path("/testSrc"); + Path sourceDir = getUniquePath("/testSrc"); assertMkdirs(fs, sourceDir); String filename = "file1"; Path file1 = new Path(sourceDir, filename); touch(file1); - Path destDir = new Path("/testDst"); + Path destDir = getUniquePath("/testDst"); assertRenameOutcome(fs, sourceDir, destDir, true); FileStatus[] fileStatus = fs.listStatus(destDir); assertNotNull("Null file status", fileStatus); @@ -113,14 +113,15 @@ public void testRenameFileUnderDir() throws Exception { @Test public void testRenameDirectory() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - fs.mkdirs(new Path("testDir")); - Path test1 = new Path("testDir/test1"); + Path testDir = getUniquePath("testDir"); + fs.mkdirs(testDir); + Path test1 = new Path(testDir + "/test1"); fs.mkdirs(test1); - fs.mkdirs(new Path("testDir/test1/test2")); - fs.mkdirs(new Path("testDir/test1/test2/test3")); + fs.mkdirs(new Path(testDir + "/test1/test2")); + fs.mkdirs(new Path(testDir + "/test1/test2/test3")); assertRenameOutcome(fs, test1, - new Path("testDir/test10"), true); + new Path(testDir + "/test10"), true); assertPathDoesNotExist(fs, "rename source dir", test1); } @@ -130,8 +131,9 @@ public void testRenameFirstLevelDirectory() throws Exception { final List> tasks = new ArrayList<>(); ExecutorService es = Executors.newFixedThreadPool(10); + Path source = getUniquePath("/test"); for (int i = 0; i < 1000; i++) { - final Path fileName = new Path("/test/" + i); + final Path fileName = new Path(source + "/" + i); Callable callable = new Callable() { @Override public Void call() throws Exception { @@ -148,8 +150,7 @@ public Void call() throws Exception { } es.shutdownNow(); - Path source = new Path("/test"); - Path dest = new Path("/renamedDir"); + Path dest = getUniquePath("/renamedDir"); assertRenameOutcome(fs, source, dest, true); FileStatus[] files = fs.listStatus(dest); @@ -173,14 +174,15 @@ public void testRenameRoot() throws Exception { @Test public void testPosixRenameDirectory() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - fs.mkdirs(new Path("testDir2/test1/test2/test3")); - fs.mkdirs(new Path("testDir2/test4")); - Assert.assertTrue(fs.rename(new Path("testDir2/test1/test2/test3"), new Path("testDir2/test4"))); - assertTrue(fs.exists(new Path("testDir2"))); - assertTrue(fs.exists(new Path("testDir2/test1/test2"))); - assertTrue(fs.exists(new Path("testDir2/test4"))); - assertTrue(fs.exists(new Path("testDir2/test4/test3"))); - assertFalse(fs.exists(new Path("testDir2/test1/test2/test3"))); + Path testDir2 = getUniquePath("testDir2"); + fs.mkdirs(new Path(testDir2 + "/test1/test2/test3")); + fs.mkdirs(new Path(testDir2 + "/test4")); + Assert.assertTrue(fs.rename(new Path(testDir2 + "/test1/test2/test3"), new Path(testDir2 + "/test4"))); + assertTrue(fs.exists(testDir2)); + assertTrue(fs.exists(new Path(testDir2 + "/test1/test2"))); + assertTrue(fs.exists(new Path(testDir2 + "/test4"))); + assertTrue(fs.exists(new Path(testDir2 + "/test4/test3"))); + assertFalse(fs.exists(new Path(testDir2 + "/test1/test2/test3"))); } @Test diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java index 044c325c8c8dc..1160f038189aa 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java @@ -76,7 +76,7 @@ public ITestAzureBlobFileSystemRenameUnicode() throws Exception { @Test public void testRenameFileUsingUnicode() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path folderPath1 = new Path(srcDir); + Path folderPath1 = getUniquePath(srcDir); assertMkdirs(fs, folderPath1); assertIsDirectory(fs, folderPath1); Path filePath = new Path(folderPath1 + "/" + filename); From 0ad689edc99e9f25d17faf0df5896d7407ceb12a Mon Sep 17 00:00:00 2001 From: sumangala Date: Mon, 28 Jun 2021 11:08:08 +0530 Subject: [PATCH 04/11] complete --- .../fs/azurebfs/ITestCustomerProvidedKey.java | 51 ++++++++++++------- .../azurebfs/ITestFileSystemProperties.java | 32 ++++++++---- .../azurebfs/ITestWasbAbfsCompatibility.java | 20 +++++--- 3 files changed, 65 insertions(+), 38 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java index 11165c8ceb723..9c5c39a72c3de 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java @@ -33,6 +33,7 @@ import java.util.Map; import java.util.Optional; import java.util.Random; +import java.util.UUID; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @@ -106,7 +107,7 @@ public ITestCustomerProvidedKey() throws Exception { @Test public void testReadWithCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(true); - String fileName = "/" + methodName.getMethodName(); + String fileName = getUniquePath("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -154,7 +155,7 @@ public void testReadWithCPK() throws Exception { @Test public void testReadWithoutCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(false); - String fileName = "/" + methodName.getMethodName(); + String fileName = getUniquePath("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -190,7 +191,7 @@ public void testReadWithoutCPK() throws Exception { @Test public void testAppendWithCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(true); - final String fileName = "/" + methodName.getMethodName(); + final String fileName = getUniquePath("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); // Trying to append with correct CPK headers @@ -233,7 +234,7 @@ public void testAppendWithCPK() throws Exception { @Test public void testAppendWithoutCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(false); - final String fileName = "/" + methodName.getMethodName(); + final String fileName = getUniquePath("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); // Trying to append without CPK headers @@ -267,7 +268,7 @@ public void testAppendWithoutCPK() throws Exception { @Test public void testSetGetXAttr() throws Exception { final AzureBlobFileSystem fs = getAbfs(true); - String fileName = methodName.getMethodName(); + final String fileName = getUniquePath(methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); String valSent = "testValue"; @@ -315,7 +316,8 @@ public void testCopyBetweenAccounts() throws Exception { AzureBlobFileSystem fs1 = getAbfs(true); int fileSize = FILE_SIZE_FOR_COPY_BETWEEN_ACCOUNTS; byte[] fileContent = getRandomBytesArray(fileSize); - Path testFilePath = createFileWithContent(fs1, "fs1-file.txt", fileContent); + Path testFilePath = createFileWithContent(fs1, + String.format("fs1-file%s.txt", UUID.randomUUID()), fileContent); // Create fs2 with different CPK Configuration conf = new Configuration(); @@ -330,7 +332,8 @@ public void testCopyBetweenAccounts() throws Exception { AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf); // Read from fs1 and write to fs2, fs1 and fs2 are having different CPK - Path fs2DestFilePath = new Path("fs2-dest-file.txt"); + Path fs2DestFilePath = new Path( + String.format("fs2-dest-file%s.txt", UUID.randomUUID())); FSDataOutputStream ops = fs2.create(fs2DestFilePath); try (FSDataInputStream iStream = fs1.open(testFilePath)) { long totalBytesRead = 0; @@ -398,8 +401,8 @@ public void testListPathWithoutCPK() throws Exception { private void testListPath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - String testDirName = "/" + methodName.getMethodName(); - final Path testPath = new Path(testDirName); + final Path testPath = getUniquePath("/" + methodName.getMethodName()); + String testDirName = testPath.toString(); fs.mkdirs(testPath); createFileAndGetContent(fs, testDirName + "/aaa", FILE_SIZE); createFileAndGetContent(fs, testDirName + "/bbb", FILE_SIZE); @@ -455,7 +458,8 @@ public void testCreatePathWithoutCPK() throws Exception { private void testCreatePath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = getUniquePath("/" + methodName.getMethodName()) + .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -496,7 +500,8 @@ public void testRenamePathWithoutCPK() throws Exception { private void testRenamePath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = getUniquePath("/" + methodName.getMethodName()) + .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); FileStatus fileStatusBeforeRename = fs @@ -530,7 +535,8 @@ public void testFlushWithoutCPK() throws Exception { private void testFlush(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = getUniquePath("/" + methodName.getMethodName()) + .toString(); fs.create(new Path(testFileName)); AbfsClient abfsClient = fs.getAbfsClient(); String expectedCPKSha = getCPKSha(fs); @@ -586,7 +592,8 @@ public void testSetPathPropertiesWithoutCPK() throws Exception { private void testSetPathProperties(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = getUniquePath("/" + methodName.getMethodName()) + .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -615,7 +622,8 @@ public void testGetPathStatusFileWithoutCPK() throws Exception { private void testGetPathStatusFile(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = getUniquePath("/" + methodName.getMethodName()) + .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -651,7 +659,8 @@ public void testDeletePathWithoutCPK() throws Exception { private void testDeletePath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = getUniquePath("/" + methodName.getMethodName()) + .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); FileStatus[] listStatuses = fs.listStatus(new Path(testFileName)); @@ -680,7 +689,8 @@ public void testSetPermissionWithoutCPK() throws Exception { private void testSetPermission(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = getUniquePath("/" + methodName.getMethodName()) + .toString(); Assume.assumeTrue(fs.getIsNamespaceEnabled()); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -704,7 +714,8 @@ public void testSetAclWithoutCPK() throws Exception { private void testSetAcl(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = getUniquePath("/" + methodName.getMethodName()) + .toString(); Assume.assumeTrue(fs.getIsNamespaceEnabled()); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -731,7 +742,8 @@ public void testGetAclWithoutCPK() throws Exception { private void testGetAcl(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = getUniquePath("/" + methodName.getMethodName()) + .toString(); Assume.assumeTrue(fs.getIsNamespaceEnabled()); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -759,7 +771,8 @@ private void testCheckAccess(final boolean isWithCPK) throws Exception { getAuthType() == AuthType.OAuth); final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = getUniquePath("/" + methodName.getMethodName()) + .toString(); fs.create(new Path(testFileName)); AbfsClient abfsClient = fs.getAbfsClient(); AbfsRestOperation abfsRestOperation = abfsClient diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java index ba9b639adb602..659f34ae5ff25 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java @@ -32,16 +32,22 @@ */ public class ITestFileSystemProperties extends AbstractAbfsIntegrationTest { private static final int TEST_DATA = 100; - private static final Path TEST_PATH = new Path("/testfile"); + private static final String TEST_PATH = "/testfile"; public ITestFileSystemProperties() throws Exception { } @Test public void testReadWriteBytesToFileAndEnsureThreadPoolCleanup() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - testWriteOneByteToFileAndEnsureThreadPoolCleanup(); + Path testPath = getUniquePath(TEST_PATH); + try(FSDataOutputStream stream = fs.create(testPath)) { + stream.write(TEST_DATA); + } + + FileStatus fileStatus = fs.getFileStatus(testPath); + assertEquals(1, fileStatus.getLen()); - try(FSDataInputStream inputStream = fs.open(TEST_PATH, 4 * 1024 * 1024)) { + try(FSDataInputStream inputStream = fs.open(testPath, 4 * 1024 * 1024)) { int i = inputStream.read(); assertEquals(TEST_DATA, i); } @@ -50,11 +56,12 @@ public void testReadWriteBytesToFileAndEnsureThreadPoolCleanup() throws Exceptio @Test public void testWriteOneByteToFileAndEnsureThreadPoolCleanup() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - try(FSDataOutputStream stream = fs.create(TEST_PATH)) { + Path testPath = getUniquePath(TEST_PATH); + try(FSDataOutputStream stream = fs.create(testPath)) { stream.write(TEST_DATA); } - FileStatus fileStatus = fs.getFileStatus(TEST_PATH); + FileStatus fileStatus = fs.getFileStatus(testPath); assertEquals(1, fileStatus.getLen()); } @@ -75,10 +82,11 @@ public void testBase64PathProperties() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final Hashtable properties = new Hashtable<>(); properties.put("key", "{ value: valueTest }"); - touch(TEST_PATH); - fs.getAbfsStore().setPathProperties(TEST_PATH, properties); + Path testPath = getUniquePath(TEST_PATH); + touch(testPath); + fs.getAbfsStore().setPathProperties(testPath, properties); Hashtable fetchedProperties = - fs.getAbfsStore().getPathStatus(TEST_PATH); + fs.getAbfsStore().getPathStatus(testPath); assertEquals(properties, fetchedProperties); } @@ -99,9 +107,11 @@ public void testBase64InvalidPathProperties() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final Hashtable properties = new Hashtable<>(); properties.put("key", "{ value: valueTestå…© }"); - touch(TEST_PATH); - fs.getAbfsStore().setPathProperties(TEST_PATH, properties); - Hashtable fetchedProperties = fs.getAbfsStore().getPathStatus(TEST_PATH); + Path testPath = getUniquePath(TEST_PATH); + touch(testPath); + fs.getAbfsStore().setPathProperties(testPath, properties); + Hashtable fetchedProperties = fs.getAbfsStore() + .getPathStatus(testPath); assertEquals(properties, fetchedProperties); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java index 609512d9c0478..9ab56b7a11f41 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java @@ -62,7 +62,8 @@ public void testListFileStatus() throws Exception { NativeAzureFileSystem wasb = getWasbFileSystem(); - Path path1 = new Path("/testfiles/~12/!008/3/abFsTestfile"); + Path testFiles = getUniquePath("/testfiles"); + Path path1 = new Path(testFiles + "/~12/!008/3/abFsTestfile"); try(FSDataOutputStream abfsStream = fs.create(path1, true)) { abfsStream.write(ABFS_TEST_CONTEXT.getBytes()); abfsStream.flush(); @@ -70,7 +71,7 @@ public void testListFileStatus() throws Exception { } // create file using wasb - Path path2 = new Path("/testfiles/~12/!008/3/nativeFsTestfile"); + Path path2 = new Path(testFiles + "/~12/!008/3/nativeFsTestfile"); LOG.info("{}", wasb.getUri()); try(FSDataOutputStream nativeFsStream = wasb.create(path2, true)) { nativeFsStream.write(WASB_TEST_CONTEXT.getBytes()); @@ -78,8 +79,8 @@ public void testListFileStatus() throws Exception { nativeFsStream.hsync(); } // list file using abfs and wasb - FileStatus[] abfsFileStatus = fs.listStatus(new Path("/testfiles/~12/!008/3/")); - FileStatus[] nativeFsFileStatus = wasb.listStatus(new Path("/testfiles/~12/!008/3/")); + FileStatus[] abfsFileStatus = fs.listStatus(new Path(testFiles + "/~12/!008/3/")); + FileStatus[] nativeFsFileStatus = wasb.listStatus(new Path(testFiles + "/~12/!008/3/")); assertEquals(2, abfsFileStatus.length); assertEquals(2, nativeFsFileStatus.length); @@ -97,8 +98,9 @@ public void testReadFile() throws Exception { NativeAzureFileSystem wasb = getWasbFileSystem(); + Path testFile = getUniquePath("/testReadFile"); for (int i = 0; i< 4; i++) { - Path path = new Path("/testReadFile/~12/!008/testfile" + i); + Path path = new Path(testFile + "/~12/!008/testfile" + i); final FileSystem createFs = createFileWithAbfs[i] ? abfs : wasb; // Write @@ -137,8 +139,9 @@ public void testDir() throws Exception { NativeAzureFileSystem wasb = getWasbFileSystem(); + Path testDir = getUniquePath("/testDir"); for (int i = 0; i < 4; i++) { - Path path = new Path("/testDir/t" + i); + Path path = new Path(testDir + "/t" + i); //create final FileSystem createFs = createDirWithAbfs[i] ? abfs : wasb; assertTrue(createFs.mkdirs(path)); @@ -172,11 +175,12 @@ public void testSetWorkingDirectory() throws Exception { NativeAzureFileSystem wasb = getWasbFileSystem(); - Path d1d4 = new Path("/d1/d2/d3/d4"); + Path d1 = getUniquePath("/d1"); + Path d1d4 = new Path(d1 + "/d2/d3/d4"); assertMkdirs(abfs, d1d4); //set working directory to path1 - Path path1 = new Path("/d1/d2"); + Path path1 = new Path(d1 + "/d2"); wasb.setWorkingDirectory(path1); abfs.setWorkingDirectory(path1); assertEquals(path1, wasb.getWorkingDirectory()); From b9977b30700db14e420c44d14deafda4530ce614 Mon Sep 17 00:00:00 2001 From: sumangala Date: Mon, 5 Jul 2021 15:10:23 +0530 Subject: [PATCH 05/11] chkstyle --- .../fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java index 2b933c924a773..fa8786953a230 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java @@ -413,8 +413,8 @@ public void testSkipAndAvailableAndPosition() throws Exception { @Test public void testSequentialReadAfterReverseSeekPerformance() throws Exception { - Path testPath = getUniquePath(TEST_FILE_PREFIX + - "_testSequentialReadAfterReverseSeekPerformance"); + Path testPath = getUniquePath( + TEST_FILE_PREFIX + "_testSequentialReadAfterReverseSeekPerformance"); assumeHugeFileExists(testPath); final int maxAttempts = 10; final double maxAcceptableRatio = 1.01; From 95b5e857910cf455e8fa2937c8a8ba2da6e324fe Mon Sep 17 00:00:00 2001 From: sumangala Date: Wed, 14 Jul 2021 05:53:33 +0530 Subject: [PATCH 06/11] use path() with unique filename --- .../azurebfs/AbstractAbfsIntegrationTest.java | 5 ++- .../hadoop/fs/azurebfs/ITestAbfsClient.java | 4 +-- .../ITestAbfsListStatusRemoteIterator.java | 4 +-- .../azurebfs/ITestAbfsReadWriteAndSeek.java | 4 +-- .../azurebfs/ITestAbfsStreamStatistics.java | 4 +-- .../ITestAzureBlobFileSystemAppend.java | 10 +++--- .../ITestAzureBlobFileSystemBackCompat.java | 2 +- .../ITestAzureBlobFileSystemCheckAccess.java | 2 +- .../ITestAzureBlobFileSystemCopy.java | 2 +- .../ITestAzureBlobFileSystemCreate.java | 12 +++---- .../ITestAzureBlobFileSystemDelete.java | 10 +++--- .../azurebfs/ITestAzureBlobFileSystemE2E.java | 18 +++++------ .../ITestAzureBlobFileSystemFileStatus.java | 11 +++---- .../ITestAzureBlobFileSystemListStatus.java | 14 ++++---- .../ITestAzureBlobFileSystemMkDir.java | 6 ++-- .../ITestAzureBlobFileSystemOauth.java | 8 ++--- .../ITestAzureBlobFileSystemRandomRead.java | 18 +++++------ .../ITestAzureBlobFileSystemRename.java | 12 +++---- ...ITestAzureBlobFileSystemRenameUnicode.java | 2 +- .../fs/azurebfs/ITestCustomerProvidedKey.java | 32 +++++++++---------- .../azurebfs/ITestFileSystemProperties.java | 8 ++--- .../azurebfs/ITestWasbAbfsCompatibility.java | 8 ++--- .../services/ITestAbfsOutputStream.java | 4 +-- .../services/TestAbfsInputStream.java | 4 +-- 24 files changed, 103 insertions(+), 101 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java index 5c1f4a634f815..238b6d39f586f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java @@ -442,7 +442,7 @@ public Path makeQualified(Path path) throws java.io.IOException { */ protected Path path(String filepath) throws IOException { return getFileSystem().makeQualified( - new Path(getTestPath(), filepath)); + new Path(getTestPath(), getUniquePath(filepath))); } /** @@ -451,6 +451,9 @@ protected Path path(String filepath) throws IOException { * @return unique path created from filepath and a GUID */ protected Path getUniquePath(String filepath) { + if (filepath.equals("/")) { + return new Path(filepath); + } return new Path(filepath + StringUtils .right(UUID.randomUUID().toString(), SHORTENED_GUID_LEN)); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java index f90d410343532..1e3326693dbc7 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java @@ -93,7 +93,7 @@ public void testUnknownHost() throws Exception { public void testListPathWithValidListMaxResultsValues() throws IOException, ExecutionException, InterruptedException { final int fileCount = 10; - final Path directory = getUniquePath("testWithValidListMaxResultsValues"); + final Path directory = path("testWithValidListMaxResultsValues"); createDirectoryWithNFiles(directory, fileCount); final int[] testData = {fileCount + 100, fileCount + 1, fileCount, fileCount - 1, 1}; @@ -114,7 +114,7 @@ public void testListPathWithValidListMaxResultsValues() public void testListPathWithValueGreaterThanServerMaximum() throws IOException, ExecutionException, InterruptedException { setListMaxResults(LIST_MAX_RESULTS_SERVER + 100); - final Path directory = getUniquePath( + final Path directory = path( "testWithValueGreaterThanServerMaximum"); createDirectoryWithNFiles(directory, LIST_MAX_RESULTS_SERVER + 200); Assertions.assertThat(listPath(directory.toString())).describedAs( diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java index 33997325ef2dd..9e81a0127b6cb 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java @@ -237,7 +237,7 @@ public void testHasNextForEmptyDir() throws Exception { @Test public void testHasNextForFile() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testFile = getUniquePath("testFile"); + Path testFile = path("testFile"); String testFileName = testFile.toString(); getFileSystem().create(testFile); setPageSize(10); @@ -304,7 +304,7 @@ public String listStatus(Path path, String startFrom, private Path createTestDirectory() throws IOException { String testDirectoryName = "testDirectory" + System.currentTimeMillis(); - Path testDirectory = getUniquePath(testDirectoryName); + Path testDirectory = path(testDirectoryName); getFileSystem().mkdirs(testDirectory); return testDirectory; } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java index 7b0eb917ec35d..b0e82444afb34 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java @@ -75,7 +75,7 @@ private void testReadWriteAndSeek(int bufferSize) throws Exception { final byte[] b = new byte[2 * bufferSize]; new Random().nextBytes(b); - Path testPath = getUniquePath(TEST_PATH); + Path testPath = path(TEST_PATH); try (FSDataOutputStream stream = fs.create(testPath)) { stream.write(b); } @@ -113,7 +113,7 @@ public void testReadAheadRequestID() throws java.io.IOException { final byte[] b = new byte[bufferSize * 10]; new Random().nextBytes(b); - Path testPath = getUniquePath(TEST_PATH); + Path testPath = path(TEST_PATH); try (FSDataOutputStream stream = fs.create(testPath)) { ((AbfsOutputStream) stream.getWrappedStream()).registerListener( new TracingHeaderValidator(abfsConfiguration.getClientCorrelationId(), diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java index 9104657730346..e5f182df2a1a2 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java @@ -52,8 +52,8 @@ public void testAbfsStreamOps() throws Exception { + "Abfs"); final AzureBlobFileSystem fs = getFileSystem(); - Path smallOperationsFile = getUniquePath("testOneReadWriteOps"); - Path largeOperationsFile = getUniquePath("testLargeReadWriteOps"); + Path smallOperationsFile = path("testOneReadWriteOps"); + Path largeOperationsFile = path("testLargeReadWriteOps"); FileSystem.Statistics statistics = fs.getFsStatistics(); String testReadWriteOps = "test this"; statistics.reset(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java index d650ed1c6fb3f..57f33a43c42e2 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java @@ -45,7 +45,7 @@ public ITestAzureBlobFileSystemAppend() throws Exception { @Test(expected = FileNotFoundException.class) public void testAppendDirShouldFail() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path filePath = getUniquePath(TEST_FILE_PATH); + final Path filePath = path(TEST_FILE_PATH); fs.mkdirs(filePath); fs.append(filePath, 0); } @@ -53,7 +53,7 @@ public void testAppendDirShouldFail() throws Exception { @Test public void testAppendWithLength0() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - try(FSDataOutputStream stream = fs.create(getUniquePath(TEST_FILE_PATH))) { + try(FSDataOutputStream stream = fs.create(path(TEST_FILE_PATH))) { final byte[] b = new byte[1024]; new Random().nextBytes(b); stream.write(b, 1000, 0); @@ -65,7 +65,7 @@ public void testAppendWithLength0() throws Exception { @Test(expected = FileNotFoundException.class) public void testAppendFileAfterDelete() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path filePath = getUniquePath(TEST_FILE_PATH); + final Path filePath = path(TEST_FILE_PATH); ContractTestUtils.touch(fs, filePath); fs.delete(filePath, false); @@ -75,7 +75,7 @@ public void testAppendFileAfterDelete() throws Exception { @Test(expected = FileNotFoundException.class) public void testAppendDirectory() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path folderPath = getUniquePath(TEST_FOLDER_PATH); + final Path folderPath = path(TEST_FOLDER_PATH); fs.mkdirs(folderPath); fs.append(folderPath); } @@ -83,7 +83,7 @@ public void testAppendDirectory() throws Exception { @Test public void testTracingForAppend() throws IOException { AzureBlobFileSystem fs = getFileSystem(); - Path testPath = getUniquePath(TEST_FILE_PATH); + Path testPath = path(TEST_FILE_PATH); fs.create(testPath); fs.registerListener(new TracingHeaderValidator( fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java index 2941b96fefa2e..b77e5012da7dd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java @@ -50,7 +50,7 @@ public void testBlobBackCompat() throws Exception { CloudBlobContainer container = blobClient.getContainerReference(this.getFileSystemName()); container.createIfNotExists(); - Path testPath = getUniquePath("test"); + Path testPath = path("test"); CloudBlockBlob blockBlob = container .getBlockBlobReference(testPath + "/10/10/10"); blockBlob.uploadText(""); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java index 9efe59b523581..fa05b77c61d71 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java @@ -352,7 +352,7 @@ private void modifyAcl(Path file, String uid, FsAction fsAction) private Path setupTestDirectoryAndUserAccess(String testFileName, FsAction fsAction) throws Exception { - Path testPath = getUniquePath(TEST_FOLDER_PATH); + Path testPath = path(TEST_FOLDER_PATH); Path file = new Path(testPath + testFileName); file = this.superUserFs.makeQualified(file); this.superUserFs.delete(file, true); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java index e0078fb0ff7b8..aabaf82b622a8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java @@ -53,7 +53,7 @@ public void testCopyFromLocalFileSystem() throws Exception { localFs.delete(localFilePath, true); try { writeString(localFs, localFilePath, "Testing"); - Path dstPath = getUniquePath("copiedFromLocal"); + Path dstPath = path("copiedFromLocal"); assertTrue(FileUtil.copy(localFs, localFilePath, fs, dstPath, false, fs.getConf())); assertIsFile(fs, dstPath); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java index 87f73a2868b7b..2f23ac5c5c708 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java @@ -92,7 +92,7 @@ public void testEnsureFileCreatedImmediately() throws Exception { @SuppressWarnings("deprecation") public void testCreateNonRecursive() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testFolderPath = getUniquePath(TEST_FOLDER_PATH); + Path testFolderPath = path(TEST_FOLDER_PATH); Path testFile = new Path(testFolderPath, TEST_CHILD_FILE); try { fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null); @@ -114,7 +114,7 @@ public void testCreateNonRecursive() throws Exception { @SuppressWarnings("deprecation") public void testCreateNonRecursive1() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testFolderPath = getUniquePath(TEST_FOLDER_PATH); + Path testFolderPath = path(TEST_FOLDER_PATH); Path testFile = new Path(testFolderPath, TEST_CHILD_FILE); try { fs.createNonRecursive(testFile, FsPermission.getDefault(), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 1024, (short) 1, 1024, null); @@ -133,7 +133,7 @@ public void testCreateNonRecursive1() throws Exception { public void testCreateNonRecursive2() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testFolderPath = getUniquePath(TEST_FOLDER_PATH); + Path testFolderPath = path(TEST_FOLDER_PATH); Path testFile = new Path(testFolderPath, TEST_CHILD_FILE); try { fs.createNonRecursive(testFile, FsPermission.getDefault(), false, 1024, (short) 1, 1024, null); @@ -152,7 +152,7 @@ public void testCreateNonRecursive2() throws Exception { @Test public void testWriteAfterClose() throws Throwable { final AzureBlobFileSystem fs = getFileSystem(); - Path testFolderPath = getUniquePath(TEST_FOLDER_PATH); + Path testFolderPath = path(TEST_FOLDER_PATH); Path testPath = new Path(testFolderPath, TEST_CHILD_FILE); FSDataOutputStream out = fs.create(testPath); out.close(); @@ -173,7 +173,7 @@ public void testWriteAfterClose() throws Throwable { @Test public void testTryWithResources() throws Throwable { final AzureBlobFileSystem fs = getFileSystem(); - Path testFolderPath = getUniquePath(TEST_FOLDER_PATH); + Path testFolderPath = path(TEST_FOLDER_PATH); Path testPath = new Path(testFolderPath, TEST_CHILD_FILE); try (FSDataOutputStream out = fs.create(testPath)) { out.write('1'); @@ -207,7 +207,7 @@ public void testTryWithResources() throws Throwable { @Test public void testFilterFSWriteAfterClose() throws Throwable { final AzureBlobFileSystem fs = getFileSystem(); - Path testFolderPath = getUniquePath(TEST_FOLDER_PATH); + Path testFolderPath = path(TEST_FOLDER_PATH); Path testPath = new Path(testFolderPath, TEST_CHILD_FILE); FSDataOutputStream out = fs.create(testPath); intercept(FileNotFoundException.class, diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java index 62da748dc6aa8..b9b846f52d2dd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java @@ -79,7 +79,7 @@ public ITestAzureBlobFileSystemDelete() throws Exception { public void testDeleteRoot() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testPath = getUniquePath("/testFolder"); + Path testPath = path("/testFolder"); fs.mkdirs(new Path(testPath + "_0")); fs.mkdirs(new Path(testPath + "_1")); fs.mkdirs(new Path(testPath + "_2")); @@ -99,7 +99,7 @@ public void testDeleteRoot() throws Exception { @Test() public void testOpenFileAfterDelete() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testfile = getUniquePath("/testFile"); + Path testfile = path("/testFile"); touch(testfile); assertDeleted(fs, testfile, false); @@ -110,7 +110,7 @@ public void testOpenFileAfterDelete() throws Exception { @Test public void testEnsureFileIsDeleted() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testfile = getUniquePath("testfile"); + Path testfile = path("testfile"); touch(testfile); assertDeleted(fs, testfile, false); assertPathDoesNotExist(fs, "deleted", testfile); @@ -119,7 +119,7 @@ public void testEnsureFileIsDeleted() throws Exception { @Test public void testDeleteDirectory() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path dir = getUniquePath("testfile"); + Path dir = path("testfile"); fs.mkdirs(dir); fs.mkdirs(new Path(dir + "/test1")); fs.mkdirs(new Path(dir + "/test1/test2")); @@ -134,7 +134,7 @@ public void testDeleteFirstLevelDirectory() throws Exception { final List> tasks = new ArrayList<>(); ExecutorService es = Executors.newFixedThreadPool(10); - Path dir = getUniquePath("/test"); + Path dir = path("/test"); for (int i = 0; i < 1000; i++) { final Path fileName = new Path(dir + "/" + i); Callable callable = new Callable() { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java index 1034efcd261e0..4c1653a8c13a8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java @@ -52,14 +52,14 @@ public ITestAzureBlobFileSystemE2E() throws Exception { @Test public void testWriteOneByteToFile() throws Exception { - final Path testFilePath = getUniquePath(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); testWriteOneByteToFile(testFilePath); } @Test public void testReadWriteBytesToFile() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = getUniquePath(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); testWriteOneByteToFile(testFilePath); try(FSDataInputStream inputStream = fs.open(testFilePath, TEST_DEFAULT_BUFFER_SIZE)) { @@ -78,7 +78,7 @@ public void testOOBWritesAndReadFail() throws Exception { final byte[] b = new byte[2 * readBufferSize]; new Random().nextBytes(b); - final Path testFilePath = getUniquePath(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); try(FSDataOutputStream writeStream = fs.create(testFilePath)) { writeStream.write(b); writeStream.flush(); @@ -107,7 +107,7 @@ public void testOOBWritesAndReadSucceed() throws Exception { byte[] bytesToRead = new byte[readBufferSize]; final byte[] b = new byte[2 * readBufferSize]; new Random().nextBytes(b); - final Path testFilePath = getUniquePath(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); try (FSDataOutputStream writeStream = fs.create(testFilePath)) { writeStream.write(b); @@ -130,7 +130,7 @@ public void testOOBWritesAndReadSucceed() throws Exception { @Test public void testWriteWithBufferOffset() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = getUniquePath(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); final byte[] b = new byte[1024 * 1000]; new Random().nextBytes(b); @@ -151,7 +151,7 @@ public void testWriteWithBufferOffset() throws Exception { @Test public void testReadWriteHeavyBytesToFileWithSmallerChunks() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = getUniquePath(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); final byte[] writeBuffer = new byte[5 * 1000 * 1024]; new Random().nextBytes(writeBuffer); @@ -171,7 +171,7 @@ public void testReadWriteHeavyBytesToFileWithSmallerChunks() throws Exception { @Test public void testReadWithFileNotFoundException() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = getUniquePath(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); testWriteOneByteToFile(testFilePath); FSDataInputStream inputStream = fs.open(testFilePath, TEST_DEFAULT_BUFFER_SIZE); @@ -185,7 +185,7 @@ public void testReadWithFileNotFoundException() throws Exception { @Test public void testWriteWithFileNotFoundException() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = getUniquePath(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); FSDataOutputStream stream = fs.create(testFilePath); assertTrue(fs.exists(testFilePath)); @@ -202,7 +202,7 @@ public void testWriteWithFileNotFoundException() throws Exception { @Test public void testFlushWithFileNotFoundException() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = getUniquePath(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(testFilePath).toString())) { return; } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java index d18439f1aad9e..fc27b15896c6c 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java @@ -57,7 +57,7 @@ public void testEnsureStatusWorksForRoot() throws Exception { public void testFileStatusPermissionsAndOwnerAndGroup() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); fs.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, DEFAULT_UMASK_VALUE); - Path testFile = getUniquePath(TEST_FILE); + Path testFile = path(TEST_FILE); touch(testFile); validateStatus(fs, testFile, false); } @@ -94,7 +94,7 @@ private FileStatus validateStatus(final AzureBlobFileSystem fs, final Path name, public void testFolderStatusPermissionsAndOwnerAndGroup() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); fs.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, DEFAULT_UMASK_VALUE); - Path testFolder = getUniquePath(TEST_FOLDER); + Path testFolder = path(TEST_FOLDER); fs.mkdirs(testFolder); validateStatus(fs, testFolder, true); @@ -103,11 +103,10 @@ public void testFolderStatusPermissionsAndOwnerAndGroup() throws Exception { @Test public void testAbfsPathWithHost() throws IOException { AzureBlobFileSystem fs = this.getFileSystem(); - Path host = getUniquePath("mycluster"); - Path pathWithHost1 = new Path(String.format("abfs://%s/abfs/file1.txt", host)); + Path pathWithHost1 = new Path("abfs://mycluster/abfs/file1.txt"); Path pathwithouthost1 = new Path("/abfs/file1.txt"); - Path pathWithHost2 = new Path(String.format("abfs://%s/abfs/file2.txt", host)); + Path pathWithHost2 = new Path("abfs://mycluster/abfs/file2.txt"); Path pathwithouthost2 = new Path("/abfs/file2.txt"); // verify compatibility of this path format @@ -128,7 +127,7 @@ public void testAbfsPathWithHost() throws IOException { @Test public void testLastModifiedTime() throws IOException { AzureBlobFileSystem fs = this.getFileSystem(); - Path testFilePath = getUniquePath("childfile1.txt"); + Path testFilePath = path("childfile1.txt"); long createStartTime = System.currentTimeMillis(); long minCreateStartTime = (createStartTime / 1000) * 1000 - 1; // Dividing and multiplying by 1000 to make last 3 digits 0. diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java index 368e1517b4e56..8d1330b5ea7dd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java @@ -99,7 +99,7 @@ public Void call() throws Exception { @Test public void testListFileVsListDir() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path path = getUniquePath("/testFile"); + Path path = path("/testFile"); try(FSDataOutputStream ignored = fs.create(path)) { FileStatus[] testFiles = fs.listStatus(path); assertEquals("length of test files", 1, testFiles.length); @@ -111,7 +111,7 @@ public void testListFileVsListDir() throws Exception { @Test public void testListFileVsListDir2() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testFolder = getUniquePath("/testFolder"); + Path testFolder = path("/testFolder"); fs.mkdirs(testFolder); fs.mkdirs(new Path(testFolder + "/testFolder2")); fs.mkdirs(new Path(testFolder + "/testFolder2/testFolder3")); @@ -137,7 +137,7 @@ public void testListNonExistentDir() throws Exception { @Test public void testListFiles() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testDir = getUniquePath("/test"); + Path testDir = path("/test"); fs.mkdirs(testDir); FileStatus[] fileStatuses = fs.listStatus(new Path("/")); @@ -194,7 +194,7 @@ public void testMkdirTrailingPeriodDirName() throws IOException { final AzureBlobFileSystem fs = getFileSystem(); Path nontrailingPeriodDir = path("testTrailingDir/dir"); - Path trailingPeriodDir = path("testTrailingDir/dir."); + Path trailingPeriodDir = new Path("testMkdirTrailingDir/dir."); assertMkdirs(fs, nontrailingPeriodDir); @@ -213,8 +213,8 @@ public void testCreateTrailingPeriodFileName() throws IOException { boolean exceptionThrown = false; final AzureBlobFileSystem fs = getFileSystem(); - Path trailingPeriodFile = path("testTrailingDir/file."); - Path nontrailingPeriodFile = path("testTrailingDir/file"); + Path trailingPeriodFile = new Path("testTrailingDir/file."); + Path nontrailingPeriodFile = path("testCreateTrailingDir/file"); createFile(fs, nontrailingPeriodFile, false, new byte[0]); assertPathExists(fs, "Trailing period file does not exist", @@ -236,7 +236,7 @@ public void testRenameTrailingPeriodFile() throws IOException { final AzureBlobFileSystem fs = getFileSystem(); Path nonTrailingPeriodFile = path("testTrailingDir/file"); - Path trailingPeriodFile = path("testTrailingDir/file."); + Path trailingPeriodFile = new Path("testRenameTrailingDir/file."); createFile(fs, nonTrailingPeriodFile, false, new byte[0]); try { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java index e0b500e96fede..1d898ebd355b4 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java @@ -49,7 +49,7 @@ public void testCreateDirWithExistingDir() throws Exception { DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE || !getIsNamespaceEnabled( getFileSystem())); final AzureBlobFileSystem fs = getFileSystem(); - Path path = getUniquePath("testFolder"); + Path path = path("testFolder"); assertMkdirs(fs, path); assertMkdirs(fs, path); } @@ -64,7 +64,7 @@ public void testMkdirExistingDirOverwriteFalse() throws Exception { Configuration config = new Configuration(this.getRawConfiguration()); config.set(FS_AZURE_ENABLE_MKDIR_OVERWRITE, Boolean.toString(false)); AzureBlobFileSystem fs = getFileSystem(config); - Path path = getUniquePath("testFolder"); + Path path = path("testFolder"); assertMkdirs(fs, path); //checks that mkdirs returns true long timeCreated = fs.getFileStatus(path).getModificationTime(); assertMkdirs(fs, path); //call to existing dir should return success @@ -78,7 +78,7 @@ public void createDirWithExistingFilename() throws Exception { DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE && getIsNamespaceEnabled( getFileSystem())); final AzureBlobFileSystem fs = getFileSystem(); - Path path = getUniquePath("testFilePath"); + Path path = path("testFilePath"); fs.create(path); assertTrue(fs.getFileStatus(path).isFile()); intercept(FileAlreadyExistsException.class, () -> fs.mkdirs(path)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java index 8993953d49405..7f413934f1432 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java @@ -72,8 +72,8 @@ public void testBlobDataContributor() throws Exception { String secret = this.getConfiguration().get(TestConfigurationKeys.FS_AZURE_BLOB_DATA_CONTRIBUTOR_CLIENT_SECRET); Assume.assumeTrue("Contributor client secret not provided", secret != null); - Path existedFilePath = getUniquePath(EXISTED_FILE_PATH); - Path existedFolderPath = getUniquePath(EXISTED_FOLDER_PATH); + Path existedFilePath = path(EXISTED_FILE_PATH); + Path existedFolderPath = path(EXISTED_FOLDER_PATH); prepareFiles(existedFilePath, existedFolderPath); final AzureBlobFileSystem fs = getBlobConributor(); @@ -127,8 +127,8 @@ public void testBlobDataReader() throws Exception { String secret = this.getConfiguration().get(TestConfigurationKeys.FS_AZURE_BLOB_DATA_READER_CLIENT_SECRET); Assume.assumeTrue("Reader client secret not provided", secret != null); - Path existedFilePath = getUniquePath(EXISTED_FILE_PATH); - Path existedFolderPath = getUniquePath(EXISTED_FOLDER_PATH); + Path existedFilePath = path(EXISTED_FILE_PATH); + Path existedFolderPath = path(EXISTED_FOLDER_PATH); prepareFiles(existedFilePath, existedFolderPath); final AzureBlobFileSystem fs = getBlobReader(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java index fa8786953a230..c1f0e06439950 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java @@ -86,7 +86,7 @@ public ITestAzureBlobFileSystemRandomRead() throws Exception { @Test public void testBasicRead() throws Exception { - Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testBasicRead"); + Path testPath = path(TEST_FILE_PREFIX + "_testBasicRead"); assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -115,7 +115,7 @@ public void testBasicRead() throws Exception { public void testRandomRead() throws Exception { Assume.assumeFalse("This test does not support namespace enabled account", getIsNamespaceEnabled(getFileSystem())); - Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testRandomRead"); + Path testPath = path(TEST_FILE_PREFIX + "_testRandomRead"); assumeHugeFileExists(testPath); try ( @@ -174,7 +174,7 @@ public void testRandomRead() throws Exception { */ @Test public void testSeekToNewSource() throws Exception { - Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testSeekToNewSource"); + Path testPath = path(TEST_FILE_PREFIX + "_testSeekToNewSource"); assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -189,7 +189,7 @@ public void testSeekToNewSource() throws Exception { */ @Test public void testSkipBounds() throws Exception { - Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testSkipBounds"); + Path testPath = path(TEST_FILE_PREFIX + "_testSkipBounds"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -230,7 +230,7 @@ public Long call() throws Exception { */ @Test public void testValidateSeekBounds() throws Exception { - Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testValidateSeekBounds"); + Path testPath = path(TEST_FILE_PREFIX + "_testValidateSeekBounds"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -281,7 +281,7 @@ public FSDataInputStream call() throws Exception { */ @Test public void testSeekAndAvailableAndPosition() throws Exception { - Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testSeekAndAvailableAndPosition"); + Path testPath = path(TEST_FILE_PREFIX + "_testSeekAndAvailableAndPosition"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -347,7 +347,7 @@ public void testSeekAndAvailableAndPosition() throws Exception { */ @Test public void testSkipAndAvailableAndPosition() throws Exception { - Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testSkipAndAvailableAndPosition"); + Path testPath = path(TEST_FILE_PREFIX + "_testSkipAndAvailableAndPosition"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -413,7 +413,7 @@ public void testSkipAndAvailableAndPosition() throws Exception { @Test public void testSequentialReadAfterReverseSeekPerformance() throws Exception { - Path testPath = getUniquePath( + Path testPath = path( TEST_FILE_PREFIX + "_testSequentialReadAfterReverseSeekPerformance"); assumeHugeFileExists(testPath); final int maxAttempts = 10; @@ -447,7 +447,7 @@ public void testSequentialReadAfterReverseSeekPerformance() public void testRandomReadPerformance() throws Exception { Assume.assumeFalse("This test does not support namespace enabled account", getIsNamespaceEnabled(getFileSystem())); - Path testPath = getUniquePath(TEST_FILE_PREFIX + "_testRandomReadPerformance"); + Path testPath = path(TEST_FILE_PREFIX + "_testRandomReadPerformance"); assumeHugeFileExists(testPath); final AzureBlobFileSystem abFs = this.getFileSystem(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java index bd7c8c7ec836f..b140dc9f3a509 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java @@ -95,13 +95,13 @@ public void testRenameWithPreExistingDestination() throws Exception { @Test public void testRenameFileUnderDir() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path sourceDir = getUniquePath("/testSrc"); + Path sourceDir = path("/testSrc"); assertMkdirs(fs, sourceDir); String filename = "file1"; Path file1 = new Path(sourceDir, filename); touch(file1); - Path destDir = getUniquePath("/testDst"); + Path destDir = path("/testDst"); assertRenameOutcome(fs, sourceDir, destDir, true); FileStatus[] fileStatus = fs.listStatus(destDir); assertNotNull("Null file status", fileStatus); @@ -113,7 +113,7 @@ public void testRenameFileUnderDir() throws Exception { @Test public void testRenameDirectory() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testDir = getUniquePath("testDir"); + Path testDir = path("testDir"); fs.mkdirs(testDir); Path test1 = new Path(testDir + "/test1"); fs.mkdirs(test1); @@ -131,7 +131,7 @@ public void testRenameFirstLevelDirectory() throws Exception { final List> tasks = new ArrayList<>(); ExecutorService es = Executors.newFixedThreadPool(10); - Path source = getUniquePath("/test"); + Path source = path("/test"); for (int i = 0; i < 1000; i++) { final Path fileName = new Path(source + "/" + i); Callable callable = new Callable() { @@ -150,7 +150,7 @@ public Void call() throws Exception { } es.shutdownNow(); - Path dest = getUniquePath("/renamedDir"); + Path dest = path("/renamedDir"); assertRenameOutcome(fs, source, dest, true); FileStatus[] files = fs.listStatus(dest); @@ -174,7 +174,7 @@ public void testRenameRoot() throws Exception { @Test public void testPosixRenameDirectory() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Path testDir2 = getUniquePath("testDir2"); + Path testDir2 = path("testDir2"); fs.mkdirs(new Path(testDir2 + "/test1/test2/test3")); fs.mkdirs(new Path(testDir2 + "/test4")); Assert.assertTrue(fs.rename(new Path(testDir2 + "/test1/test2/test3"), new Path(testDir2 + "/test4"))); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java index 1160f038189aa..f913da7b15ed0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java @@ -76,7 +76,7 @@ public ITestAzureBlobFileSystemRenameUnicode() throws Exception { @Test public void testRenameFileUsingUnicode() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path folderPath1 = getUniquePath(srcDir); + Path folderPath1 = path(srcDir); assertMkdirs(fs, folderPath1); assertIsDirectory(fs, folderPath1); Path filePath = new Path(folderPath1 + "/" + filename); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java index 5970f2e7ae779..59056c1ffe8a9 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java @@ -108,7 +108,7 @@ public ITestCustomerProvidedKey() throws Exception { @Test public void testReadWithCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(true); - String fileName = getUniquePath("/" + methodName.getMethodName()).toString(); + String fileName = path("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -158,7 +158,7 @@ public void testReadWithCPK() throws Exception { @Test public void testReadWithoutCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(false); - String fileName = getUniquePath("/" + methodName.getMethodName()).toString(); + String fileName = path("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -197,7 +197,7 @@ public void testReadWithoutCPK() throws Exception { @Test public void testAppendWithCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(true); - final String fileName = getUniquePath("/" + methodName.getMethodName()).toString(); + final String fileName = path("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); // Trying to append with correct CPK headers @@ -242,7 +242,7 @@ public void testAppendWithCPK() throws Exception { @Test public void testAppendWithoutCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(false); - final String fileName = getUniquePath("/" + methodName.getMethodName()).toString(); + final String fileName = path("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); // Trying to append without CPK headers @@ -278,7 +278,7 @@ public void testAppendWithoutCPK() throws Exception { @Test public void testSetGetXAttr() throws Exception { final AzureBlobFileSystem fs = getAbfs(true); - final String fileName = getUniquePath(methodName.getMethodName()).toString(); + final String fileName = path(methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); String valSent = "testValue"; @@ -411,7 +411,7 @@ public void testListPathWithoutCPK() throws Exception { private void testListPath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final Path testPath = getUniquePath("/" + methodName.getMethodName()); + final Path testPath = path("/" + methodName.getMethodName()); String testDirName = testPath.toString(); fs.mkdirs(testPath); createFileAndGetContent(fs, testDirName + "/aaa", FILE_SIZE); @@ -471,7 +471,7 @@ public void testCreatePathWithoutCPK() throws Exception { private void testCreatePath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = getUniquePath("/" + methodName.getMethodName()) + final String testFileName = path("/" + methodName.getMethodName()) .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); @@ -515,7 +515,7 @@ public void testRenamePathWithoutCPK() throws Exception { private void testRenamePath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = getUniquePath("/" + methodName.getMethodName()) + final String testFileName = path("/" + methodName.getMethodName()) .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); @@ -551,7 +551,7 @@ public void testFlushWithoutCPK() throws Exception { private void testFlush(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = getUniquePath("/" + methodName.getMethodName()) + final String testFileName = path("/" + methodName.getMethodName()) .toString(); fs.create(new Path(testFileName)); AbfsClient abfsClient = fs.getAbfsClient(); @@ -611,7 +611,7 @@ public void testSetPathPropertiesWithoutCPK() throws Exception { private void testSetPathProperties(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = getUniquePath("/" + methodName.getMethodName()) + final String testFileName = path("/" + methodName.getMethodName()) .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); @@ -642,7 +642,7 @@ public void testGetPathStatusFileWithoutCPK() throws Exception { private void testGetPathStatusFile(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = getUniquePath("/" + methodName.getMethodName()) + final String testFileName = path("/" + methodName.getMethodName()) .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); @@ -680,7 +680,7 @@ public void testDeletePathWithoutCPK() throws Exception { private void testDeletePath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = getUniquePath("/" + methodName.getMethodName()) + final String testFileName = path("/" + methodName.getMethodName()) .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); @@ -711,7 +711,7 @@ public void testSetPermissionWithoutCPK() throws Exception { private void testSetPermission(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = getUniquePath("/" + methodName.getMethodName()) + final String testFileName = path("/" + methodName.getMethodName()) .toString(); Assume.assumeTrue(fs.getIsNamespaceEnabled(getTestTracingContext(fs, false))); createFileAndGetContent(fs, testFileName, FILE_SIZE); @@ -737,7 +737,7 @@ public void testSetAclWithoutCPK() throws Exception { private void testSetAcl(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = getUniquePath("/" + methodName.getMethodName()) + final String testFileName = path("/" + methodName.getMethodName()) .toString(); TracingContext tracingContext = getTestTracingContext(fs, false); Assume.assumeTrue(fs.getIsNamespaceEnabled(tracingContext)); @@ -767,7 +767,7 @@ public void testGetAclWithoutCPK() throws Exception { private void testGetAcl(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = getUniquePath("/" + methodName.getMethodName()) + final String testFileName = path("/" + methodName.getMethodName()) .toString(); TracingContext tracingContext = getTestTracingContext(fs, false); Assume.assumeTrue(fs.getIsNamespaceEnabled(tracingContext)); @@ -798,7 +798,7 @@ private void testCheckAccess(final boolean isWithCPK) throws Exception { getAuthType() == AuthType.OAuth); final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = getUniquePath("/" + methodName.getMethodName()) + final String testFileName = path("/" + methodName.getMethodName()) .toString(); fs.create(new Path(testFileName)); AbfsClient abfsClient = fs.getAbfsClient(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java index 565f828052aca..0ccef2e6ccb34 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java @@ -40,7 +40,7 @@ public ITestFileSystemProperties() throws Exception { @Test public void testReadWriteBytesToFileAndEnsureThreadPoolCleanup() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testPath = getUniquePath(TEST_PATH); + Path testPath = path(TEST_PATH); try(FSDataOutputStream stream = fs.create(testPath)) { stream.write(TEST_DATA); } @@ -57,7 +57,7 @@ public void testReadWriteBytesToFileAndEnsureThreadPoolCleanup() throws Exceptio @Test public void testWriteOneByteToFileAndEnsureThreadPoolCleanup() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testPath = getUniquePath(TEST_PATH); + Path testPath = path(TEST_PATH); try(FSDataOutputStream stream = fs.create(testPath)) { stream.write(TEST_DATA); } @@ -85,7 +85,7 @@ public void testBase64PathProperties() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final Hashtable properties = new Hashtable<>(); properties.put("key", "{ value: valueTest }"); - Path testPath = getUniquePath(TEST_PATH); + Path testPath = path(TEST_PATH); touch(testPath); TracingContext tracingContext = getTestTracingContext(fs, true); fs.getAbfsStore().setPathProperties(testPath, properties, tracingContext); @@ -113,7 +113,7 @@ public void testBase64InvalidPathProperties() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final Hashtable properties = new Hashtable<>(); properties.put("key", "{ value: valueTestå…© }"); - Path testPath = getUniquePath(TEST_PATH); + Path testPath = path(TEST_PATH); touch(testPath); TracingContext tracingContext = getTestTracingContext(fs, true); fs.getAbfsStore().setPathProperties(testPath, properties, tracingContext); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java index cc88ac704e573..0534cdda99fc8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java @@ -62,7 +62,7 @@ public void testListFileStatus() throws Exception { NativeAzureFileSystem wasb = getWasbFileSystem(); - Path testFiles = getUniquePath("/testfiles"); + Path testFiles = path("/testfiles"); Path path1 = new Path(testFiles + "/~12/!008/3/abFsTestfile"); try(FSDataOutputStream abfsStream = fs.create(path1, true)) { abfsStream.write(ABFS_TEST_CONTEXT.getBytes()); @@ -98,7 +98,7 @@ public void testReadFile() throws Exception { NativeAzureFileSystem wasb = getWasbFileSystem(); - Path testFile = getUniquePath("/testReadFile"); + Path testFile = path("/testReadFile"); for (int i = 0; i< 4; i++) { Path path = new Path(testFile + "/~12/!008/testfile" + i); final FileSystem createFs = createFileWithAbfs[i] ? abfs : wasb; @@ -139,7 +139,7 @@ public void testDir() throws Exception { NativeAzureFileSystem wasb = getWasbFileSystem(); - Path testDir = getUniquePath("/testDir"); + Path testDir = path("/testDir"); for (int i = 0; i < 4; i++) { Path path = new Path(testDir + "/t" + i); //create @@ -175,7 +175,7 @@ public void testSetWorkingDirectory() throws Exception { NativeAzureFileSystem wasb = getWasbFileSystem(); - Path d1 = getUniquePath("/d1"); + Path d1 = path("/d1"); Path d1d4 = new Path(d1 + "/d2/d3/d4"); assertMkdirs(abfs, d1d4); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java index bdd2178bc1664..5b1a014ee7ade 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java @@ -41,7 +41,7 @@ public ITestAbfsOutputStream() throws Exception { public void testMaxRequestsAndQueueCapacityDefaults() throws Exception { Configuration conf = getRawConfiguration(); final AzureBlobFileSystem fs = getFileSystem(conf); - try (FSDataOutputStream out = fs.create(getUniquePath(TEST_FILE_PATH))) { + try (FSDataOutputStream out = fs.create(path(TEST_FILE_PATH))) { AbfsOutputStream stream = (AbfsOutputStream) out.getWrappedStream(); int maxConcurrentRequests @@ -70,7 +70,7 @@ public void testMaxRequestsAndQueueCapacity() throws Exception { conf.set(ConfigurationKeys.AZURE_WRITE_MAX_REQUESTS_TO_QUEUE, "" + maxRequestsToQueue); final AzureBlobFileSystem fs = getFileSystem(conf); - FSDataOutputStream out = fs.create(getUniquePath(TEST_FILE_PATH)); + FSDataOutputStream out = fs.create(path(TEST_FILE_PATH)); AbfsOutputStream stream = (AbfsOutputStream) out.getWrappedStream(); if (stream.isAppendBlobStream()) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java index 1b5459be6d75d..b37b320a70a44 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java @@ -583,7 +583,7 @@ public void testDefaultReadaheadQueueDepth() throws Exception { Configuration config = getRawConfiguration(); config.unset(FS_AZURE_READ_AHEAD_QUEUE_DEPTH); AzureBlobFileSystem fs = getFileSystem(config); - Path testFile = getUniquePath("/testFile"); + Path testFile = path("/testFile"); fs.create(testFile); FSDataInputStream in = fs.open(testFile); Assertions.assertThat( @@ -646,7 +646,7 @@ public AbfsInputStream testReadAheadConfigs(int readRequestSize, readAheadBlockSize = readRequestSize; } - Path testPath = getUniquePath("/testReadAheadConfigs"); + Path testPath = path("/testReadAheadConfigs"); final AzureBlobFileSystem fs = createTestFile(testPath, ALWAYS_READ_BUFFER_SIZE_TEST_FILE_SIZE, config); byte[] byteBuffer = new byte[ONE_MB]; From d02c0cdd03d78f85079b716aa9b63e9879e989f0 Mon Sep 17 00:00:00 2001 From: sumangala Date: Wed, 14 Jul 2021 11:30:38 +0530 Subject: [PATCH 07/11] assert --- .../azurebfs/AbstractAbfsIntegrationTest.java | 5 ++-- .../fs/azurebfs/ITestAbfsStatistics.java | 24 +++++++-------- ...ITestAzureBlobFileSystemDelegationSAS.java | 30 ++++++++++--------- .../azurebfs/ITestAzureBlobFileSystemE2E.java | 12 ++++---- .../ITestAzureBlobFileSystemFileStatus.java | 6 ++-- .../ITestAzureBlobFileSystemOauth.java | 18 ++++++----- .../ITestAzureBlobFileSystemRename.java | 12 ++++---- .../azurebfs/ITestAzureBlobFilesystemAcl.java | 5 ++-- 8 files changed, 62 insertions(+), 50 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java index 238b6d39f586f..093a7d4395645 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java @@ -272,7 +272,8 @@ protected void createFilesystemForSASTests() throws Exception { // so first create temporary instance of the filesystem using SharedKey // then re-use the filesystem it creates with SAS auth instead of SharedKey. AzureBlobFileSystem tempFs = (AzureBlobFileSystem) FileSystem.newInstance(rawConfig); - Assert.assertTrue(tempFs.exists(new Path("/"))); + ContractTestUtils.assertPathExists(tempFs, "This path should exist", + new Path("/")); abfsConfig.set(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SAS.name()); usingFilesystemForSASTests = true; } @@ -446,7 +447,7 @@ protected Path path(String filepath) throws IOException { } /** - * Generate a unique path using the given filepath + * Generate a unique path using the given filepath. * @param filepath path string * @return unique path created from filepath and a GUID */ diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java index e6b572de9717c..596100136e7c4 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java @@ -31,6 +31,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_LOGGING_LEVEL; import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_LOGGING_LEVEL_INFO; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; /** * Tests AzureBlobFileSystem Statistics. @@ -211,12 +213,12 @@ public void testOpenAppendRenameExists() throws IOException { assertAbfsStatistics(AbfsStatistic.CALL_RENAME, 1, metricMap); //Testing if file exists at path. - assertTrue(String.format("File with name %s should exist", - destCreateFilePath), - fs.exists(destCreateFilePath)); - assertFalse(String.format("File with name %s should not exist", - createFilePath), - fs.exists(createFilePath)); + assertPathExists(fs, + String.format("File with name %s should exist", destCreateFilePath), + destCreateFilePath); + assertPathDoesNotExist(fs, + String.format("File with name %s should not exist", createFilePath), + createFilePath); metricMap = fs.getInstrumentationMap(); //Testing exists() calls. @@ -244,12 +246,10 @@ public void testOpenAppendRenameExists() throws IOException { assertTrue(fs.rename(createFilePath, destCreateFilePath)); //check if first name is existing and 2nd is not existing. - assertTrue(String.format("File with name %s should exist", - destCreateFilePath), - fs.exists(destCreateFilePath)); - assertFalse(String.format("File with name %s should not exist", - createFilePath), - fs.exists(createFilePath)); + assertPathExists(fs, String.format("File with name %s should exist", + destCreateFilePath), destCreateFilePath); + assertPathDoesNotExist(fs, String.format( + "File with name %s should not exist", createFilePath), createFilePath); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java index dd27efca5b5c3..bc74703496543 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java @@ -53,6 +53,8 @@ import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_SAS_TOKEN_PROVIDER_TYPE; import static org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode.AUTHORIZATION_PERMISSION_MISS_MATCH; import static org.apache.hadoop.fs.azurebfs.utils.AclTestHelpers.aclEntry; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT; import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; @@ -223,15 +225,15 @@ public void testRename() throws Exception { stream.writeBytes("hello"); } - assertFalse(fs.exists(destinationPath)); + assertPathDoesNotExist(fs, "This path should not exist", destinationPath); fs.rename(sourcePath, destinationPath); - assertFalse(fs.exists(sourcePath)); - assertTrue(fs.exists(destinationPath)); + assertPathDoesNotExist(fs, "This path should not exist", sourcePath); + assertPathExists(fs, "This path should exist", destinationPath); - assertFalse(fs.exists(destinationDir)); + assertPathDoesNotExist(fs, "This path should not exist", destinationDir); fs.rename(sourceDir, destinationDir); - assertFalse(fs.exists(sourceDir)); - assertTrue(fs.exists(destinationDir)); + assertPathDoesNotExist(fs, "This path should not exist", sourceDir); + assertPathExists(fs, "This path should exist", destinationDir); } @Test @@ -246,13 +248,13 @@ public void testDelete() throws Exception { stream.writeBytes("hello"); } - assertTrue(fs.exists(filePath)); + assertPathExists(fs, "This path should exist", filePath); fs.delete(filePath, false); - assertFalse(fs.exists(filePath)); + assertPathDoesNotExist(fs, "This path should not exist", filePath); - assertTrue(fs.exists(dirPath)); + assertPathExists(fs, "This path should exist", dirPath); fs.delete(dirPath, false); - assertFalse(fs.exists(dirPath)); + assertPathDoesNotExist(fs, "This path should not exist", dirPath); } @Test @@ -267,11 +269,11 @@ public void testDeleteRecursive() throws Exception { stream.writeBytes("hello"); } - assertTrue(fs.exists(dirPath)); - assertTrue(fs.exists(filePath)); + assertPathExists(fs, "This path should exist", dirPath); + assertPathExists(fs, "This path should exist", filePath); fs.delete(dirPath, true); - assertFalse(fs.exists(filePath)); - assertFalse(fs.exists(dirPath)); + assertPathDoesNotExist(fs, "This path should not exist", filePath); + assertPathDoesNotExist(fs, "This path should not exist", dirPath); } @Test diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java index 4c1653a8c13a8..99cce35f1f8cc 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java @@ -33,6 +33,8 @@ import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_TOLERATE_CONCURRENT_APPEND; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** @@ -176,7 +178,7 @@ public void testReadWithFileNotFoundException() throws Exception { FSDataInputStream inputStream = fs.open(testFilePath, TEST_DEFAULT_BUFFER_SIZE); fs.delete(testFilePath, true); - assertFalse(fs.exists(testFilePath)); + assertPathDoesNotExist(fs, "This path should not exist", testFilePath); intercept(FileNotFoundException.class, () -> inputStream.read(new byte[1])); @@ -188,11 +190,11 @@ public void testWriteWithFileNotFoundException() throws Exception { final Path testFilePath = path(methodName.getMethodName()); FSDataOutputStream stream = fs.create(testFilePath); - assertTrue(fs.exists(testFilePath)); + assertPathExists(fs, "Path should exist", testFilePath); stream.write(TEST_BYTE); fs.delete(testFilePath, true); - assertFalse(fs.exists(testFilePath)); + assertPathDoesNotExist(fs, "This path should not exist", testFilePath); // trigger append call intercept(FileNotFoundException.class, @@ -208,10 +210,10 @@ public void testFlushWithFileNotFoundException() throws Exception { } FSDataOutputStream stream = fs.create(testFilePath); - assertTrue(fs.exists(testFilePath)); + assertPathExists(fs, "This path should exist", testFilePath); fs.delete(testFilePath, true); - assertFalse(fs.exists(testFilePath)); + assertPathDoesNotExist(fs, "This path should not exist", testFilePath); intercept(FileNotFoundException.class, () -> stream.close()); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java index fc27b15896c6c..4a8528ed7b44f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java @@ -27,6 +27,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; + /** * Test FileStatus. */ @@ -111,10 +113,10 @@ public void testAbfsPathWithHost() throws IOException { // verify compatibility of this path format fs.create(pathWithHost1); - assertTrue(fs.exists(pathwithouthost1)); + assertPathExists(fs, "This path should exist", pathwithouthost1); fs.create(pathwithouthost2); - assertTrue(fs.exists(pathWithHost2)); + assertPathExists(fs, "This path should exist", pathWithHost2); // verify get FileStatus fileStatus1 = fs.getFileStatus(pathWithHost1); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java index 7f413934f1432..33ad812ada804 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java @@ -45,6 +45,8 @@ import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_DATA_CONTRIBUTOR_CLIENT_SECRET; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_DATA_READER_CLIENT_ID; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_DATA_READER_CLIENT_SECRET; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; /** * Test Azure Oauth with Blob Data contributor role and Blob Data Reader role. @@ -82,21 +84,21 @@ public void testBlobDataContributor() throws Exception { try(FSDataOutputStream stream = fs.create(FILE_PATH)) { stream.write(0); } - assertTrue(fs.exists(FILE_PATH)); + assertPathExists(fs, "This path should exist", FILE_PATH); FileStatus fileStatus = fs.getFileStatus(FILE_PATH); assertEquals(1, fileStatus.getLen()); // delete file assertTrue(fs.delete(FILE_PATH, true)); - assertFalse(fs.exists(FILE_PATH)); + assertPathDoesNotExist(fs, "This path should not exist", FILE_PATH); // Verify Blob Data Contributor has full access to existed folder, file // READ FOLDER - assertTrue(fs.exists(existedFolderPath)); + assertPathExists(fs, "This path should exist", existedFolderPath); //DELETE FOLDER fs.delete(existedFolderPath, true); - assertFalse(fs.exists(existedFolderPath)); + assertPathDoesNotExist(fs, "This path should not exist", existedFolderPath); // READ FILE try (FSDataInputStream stream = fs.open(existedFilePath)) { @@ -114,7 +116,7 @@ public void testBlobDataContributor() throws Exception { // REMOVE FILE fs.delete(existedFilePath, true); - assertFalse(fs.exists(existedFilePath)); + assertPathDoesNotExist(fs, "This path should not exist", existedFilePath); } /* @@ -138,7 +140,7 @@ public void testBlobDataReader() throws Exception { // TEST READ FS Map properties = abfsStore.getFilesystemProperties(tracingContext); // TEST READ FOLDER - assertTrue(fs.exists(existedFolderPath)); + assertPathExists(fs, "This path should exist", existedFolderPath); // TEST DELETE FOLDER try { @@ -170,9 +172,9 @@ private void prepareFiles(Path existedFilePath, Path existedFolderPath) throws I // Blob data contributor and Blob data reader final AzureBlobFileSystem fs = this.getFileSystem(); fs.create(existedFilePath); - assertTrue(fs.exists(existedFilePath)); + assertPathExists(fs, "This path should exist", existedFilePath); fs.mkdirs(existedFolderPath); - assertTrue(fs.exists(existedFolderPath)); + assertPathExists(fs, "This path should exist", existedFolderPath); } private AzureBlobFileSystem getBlobConributor() throws Exception { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java index b140dc9f3a509..8a0adf2443454 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java @@ -51,6 +51,7 @@ import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_CLOCK_SKEW_WITH_SERVER_IN_MS; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertMkdirs; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertRenameOutcome; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile; @@ -178,11 +179,12 @@ public void testPosixRenameDirectory() throws Exception { fs.mkdirs(new Path(testDir2 + "/test1/test2/test3")); fs.mkdirs(new Path(testDir2 + "/test4")); Assert.assertTrue(fs.rename(new Path(testDir2 + "/test1/test2/test3"), new Path(testDir2 + "/test4"))); - assertTrue(fs.exists(testDir2)); - assertTrue(fs.exists(new Path(testDir2 + "/test1/test2"))); - assertTrue(fs.exists(new Path(testDir2 + "/test4"))); - assertTrue(fs.exists(new Path(testDir2 + "/test4/test3"))); - assertFalse(fs.exists(new Path(testDir2 + "/test1/test2/test3"))); + assertPathExists(fs, "This path should exist", testDir2); + assertPathExists(fs, "This path should exist", new Path(testDir2 + "/test1/test2")); + assertPathExists(fs, "This path should exist",new Path(testDir2 + "/test4")); + assertPathExists(fs, "This path should exist",new Path(testDir2 + "/test4/test3")); + assertPathDoesNotExist(fs, "This path should not exist", new Path(testDir2 + + "/test1/test2/test3")); } @Test diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java index 03c3b49d6b253..d55f0ea4f6272 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; import static org.junit.Assume.assumeTrue; import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; @@ -1297,7 +1298,7 @@ public void testSetOwnerForNonNamespaceEnabledAccount() throws Exception { final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); - assertTrue(fs.exists(filePath)); + assertPathExists(fs, "This path should exist", filePath); TracingHeaderValidator tracingHeaderValidator = new TracingHeaderValidator( conf.getClientCorrelationId(), fs.getFileSystemId(), @@ -1320,7 +1321,7 @@ public void testSetPermissionForNonNamespaceEnabledAccount() throws Exception { final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); - assertTrue(fs.exists(filePath)); + assertPathExists(fs, "This path should exist", filePath); FsPermission oldPermission = fs.getFileStatus(filePath).getPermission(); // default permission for non-namespace enabled account is "777" FsPermission newPermission = new FsPermission("557"); From 8322a19842e07fb76dfcc7ab85e0e2a4bacfe719 Mon Sep 17 00:00:00 2001 From: sumangala Date: Sun, 18 Jul 2021 09:21:39 +0530 Subject: [PATCH 08/11] checkstyle --- .../fs/azurebfs/AbstractAbfsIntegrationTest.java | 1 - .../fs/azurebfs/ITestAzureBlobFileSystemRename.java | 13 ++++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java index 093a7d4395645..ae24cf4a107cd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java @@ -26,7 +26,6 @@ import java.util.concurrent.Callable; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java index 8a0adf2443454..441f3b1d81e67 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java @@ -180,11 +180,14 @@ public void testPosixRenameDirectory() throws Exception { fs.mkdirs(new Path(testDir2 + "/test4")); Assert.assertTrue(fs.rename(new Path(testDir2 + "/test1/test2/test3"), new Path(testDir2 + "/test4"))); assertPathExists(fs, "This path should exist", testDir2); - assertPathExists(fs, "This path should exist", new Path(testDir2 + "/test1/test2")); - assertPathExists(fs, "This path should exist",new Path(testDir2 + "/test4")); - assertPathExists(fs, "This path should exist",new Path(testDir2 + "/test4/test3")); - assertPathDoesNotExist(fs, "This path should not exist", new Path(testDir2 + - "/test1/test2/test3")); + assertPathExists(fs, "This path should exist", + new Path(testDir2 + "/test1/test2")); + assertPathExists(fs, "This path should exist", + new Path(testDir2 + "/test4")); + assertPathExists(fs, "This path should exist", + new Path(testDir2 + "/test4/test3")); + assertPathDoesNotExist(fs, "This path should not exist", + new Path(testDir2 + "/test1/test2/test3")); } @Test From f306f8853f4ff7c5061405dda9f6118cab69f1f0 Mon Sep 17 00:00:00 2001 From: sumangala Date: Sun, 18 Jul 2021 20:00:29 +0530 Subject: [PATCH 09/11] fix test failures --- .../hadoop/fs/azurebfs/ITestAbfsClient.java | 4 ++-- .../fs/azurebfs/ITestAbfsStatistics.java | 22 ++++++++++--------- .../ITestAzureBlobFileSystemBackCompat.java | 2 +- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java index 1e3326693dbc7..f90d410343532 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java @@ -93,7 +93,7 @@ public void testUnknownHost() throws Exception { public void testListPathWithValidListMaxResultsValues() throws IOException, ExecutionException, InterruptedException { final int fileCount = 10; - final Path directory = path("testWithValidListMaxResultsValues"); + final Path directory = getUniquePath("testWithValidListMaxResultsValues"); createDirectoryWithNFiles(directory, fileCount); final int[] testData = {fileCount + 100, fileCount + 1, fileCount, fileCount - 1, 1}; @@ -114,7 +114,7 @@ public void testListPathWithValidListMaxResultsValues() public void testListPathWithValueGreaterThanServerMaximum() throws IOException, ExecutionException, InterruptedException { setListMaxResults(LIST_MAX_RESULTS_SERVER + 100); - final Path directory = path( + final Path directory = getUniquePath( "testWithValueGreaterThanServerMaximum"); createDirectoryWithNFiles(directory, LIST_MAX_RESULTS_SERVER + 200); Assertions.assertThat(listPath(directory.toString())).describedAs( diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java index 596100136e7c4..304179bc5f029 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java @@ -213,12 +213,12 @@ public void testOpenAppendRenameExists() throws IOException { assertAbfsStatistics(AbfsStatistic.CALL_RENAME, 1, metricMap); //Testing if file exists at path. - assertPathExists(fs, - String.format("File with name %s should exist", destCreateFilePath), - destCreateFilePath); - assertPathDoesNotExist(fs, - String.format("File with name %s should not exist", createFilePath), - createFilePath); + assertTrue(String.format("File with name %s should exist", + destCreateFilePath), + fs.exists(destCreateFilePath)); + assertFalse(String.format("File with name %s should not exist", + createFilePath), + fs.exists(createFilePath)); metricMap = fs.getInstrumentationMap(); //Testing exists() calls. @@ -246,10 +246,12 @@ public void testOpenAppendRenameExists() throws IOException { assertTrue(fs.rename(createFilePath, destCreateFilePath)); //check if first name is existing and 2nd is not existing. - assertPathExists(fs, String.format("File with name %s should exist", - destCreateFilePath), destCreateFilePath); - assertPathDoesNotExist(fs, String.format( - "File with name %s should not exist", createFilePath), createFilePath); + assertTrue(String.format("File with name %s should exist", + destCreateFilePath), + fs.exists(destCreateFilePath)); + assertFalse(String.format("File with name %s should not exist", + createFilePath), + fs.exists(createFilePath)); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java index b77e5012da7dd..2941b96fefa2e 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java @@ -50,7 +50,7 @@ public void testBlobBackCompat() throws Exception { CloudBlobContainer container = blobClient.getContainerReference(this.getFileSystemName()); container.createIfNotExists(); - Path testPath = path("test"); + Path testPath = getUniquePath("test"); CloudBlockBlob blockBlob = container .getBlockBlobReference(testPath + "/10/10/10"); blockBlob.uploadText(""); From 98882a01de71411e62d0108f9c02a184c2fa9f84 Mon Sep 17 00:00:00 2001 From: sumangala Date: Mon, 26 Jul 2021 11:26:42 +0530 Subject: [PATCH 10/11] close streams --- .../fs/azurebfs/ITestAbfsStatistics.java | 18 ++++----- .../ITestAzureBlobFileSystemAppend.java | 8 ++-- ...ITestAzureBlobFileSystemAuthorization.java | 6 +-- ...ITestAzureBlobFileSystemDelegationSAS.java | 2 +- .../azurebfs/ITestAzureBlobFileSystemE2E.java | 39 ++++++++++--------- .../ITestAzureBlobFileSystemFileStatus.java | 6 +-- .../ITestAzureBlobFileSystemFlush.java | 22 +++++------ .../ITestAzureBlobFileSystemMkDir.java | 2 +- .../ITestAzureBlobFileSystemOauth.java | 4 +- .../ITestAzureBlobFileSystemPermission.java | 3 +- .../ITestAzureBlobFileSystemRename.java | 2 +- .../fs/azurebfs/ITestCustomerProvidedKey.java | 9 +++-- .../services/ITestAbfsOutputStream.java | 21 +++++----- .../services/TestAbfsInputStream.java | 3 +- 14 files changed, 74 insertions(+), 71 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java index 304179bc5f029..a088b9babdbd0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java @@ -93,7 +93,7 @@ public void testCreateStatistics() throws IOException { fs.mkdirs(createDirectoryPath); fs.createNonRecursive(createFilePath, FsPermission - .getDefault(), false, 1024, (short) 1, 1024, null); + .getDefault(), false, 1024, (short) 1, 1024, null).close(); Map metricMap = fs.getInstrumentationMap(); /* @@ -119,7 +119,7 @@ public void testCreateStatistics() throws IOException { fs.mkdirs(path(getMethodName() + "Dir" + i)); fs.createNonRecursive(path(getMethodName() + i), FsPermission.getDefault(), false, 1024, (short) 1, - 1024, null); + 1024, null).close(); } metricMap = fs.getInstrumentationMap(); @@ -162,7 +162,7 @@ public void testDeleteStatistics() throws IOException { files_deleted counters. */ fs.mkdirs(createDirectoryPath); - fs.create(path(createDirectoryPath + getMethodName())); + fs.create(path(createDirectoryPath + getMethodName())).close(); fs.delete(createDirectoryPath, true); Map metricMap = fs.getInstrumentationMap(); @@ -181,7 +181,7 @@ public void testDeleteStatistics() throws IOException { directories_deleted is called or not. */ fs.mkdirs(createDirectoryPath); - fs.create(createFilePath); + fs.create(createFilePath).close(); fs.delete(createDirectoryPath, true); metricMap = fs.getInstrumentationMap(); @@ -201,9 +201,9 @@ public void testOpenAppendRenameExists() throws IOException { Path createFilePath = path(getMethodName()); Path destCreateFilePath = path(getMethodName() + "New"); - fs.create(createFilePath); - fs.open(createFilePath); - fs.append(createFilePath); + fs.create(createFilePath).close(); + fs.open(createFilePath).close(); + fs.append(createFilePath).close(); assertTrue(fs.rename(createFilePath, destCreateFilePath)); Map metricMap = fs.getInstrumentationMap(); @@ -227,11 +227,11 @@ public void testOpenAppendRenameExists() throws IOException { //re-initialising Abfs to reset statistic values. fs.initialize(fs.getUri(), fs.getConf()); - fs.create(destCreateFilePath); + fs.create(destCreateFilePath).close(); for (int i = 0; i < NUMBER_OF_OPS; i++) { fs.open(destCreateFilePath); - fs.append(destCreateFilePath); + fs.append(destCreateFilePath).close(); } metricMap = fs.getInstrumentationMap(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java index 57f33a43c42e2..dbe4b42a67df3 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java @@ -47,7 +47,7 @@ public void testAppendDirShouldFail() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final Path filePath = path(TEST_FILE_PATH); fs.mkdirs(filePath); - fs.append(filePath, 0); + fs.append(filePath, 0).close(); } @Test @@ -69,7 +69,7 @@ public void testAppendFileAfterDelete() throws Exception { ContractTestUtils.touch(fs, filePath); fs.delete(filePath, false); - fs.append(filePath); + fs.append(filePath).close(); } @Test(expected = FileNotFoundException.class) @@ -77,14 +77,14 @@ public void testAppendDirectory() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final Path folderPath = path(TEST_FOLDER_PATH); fs.mkdirs(folderPath); - fs.append(folderPath); + fs.append(folderPath).close(); } @Test public void testTracingForAppend() throws IOException { AzureBlobFileSystem fs = getFileSystem(); Path testPath = path(TEST_FILE_PATH); - fs.create(testPath); + fs.create(testPath).close(); fs.registerListener(new TracingHeaderValidator( fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), fs.getFileSystemId(), FSOperationType.APPEND, false, 0)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java index 589c3a285f48f..338cf8476afd8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java @@ -99,7 +99,7 @@ public void testSASTokenProviderEmptySASToken() throws Exception { this.getConfiguration().getRawConfiguration()); intercept(SASTokenProviderException.class, () -> { - testFs.create(new org.apache.hadoop.fs.Path("/testFile")); + testFs.create(new org.apache.hadoop.fs.Path("/testFile")).close(); }); } @@ -114,7 +114,7 @@ public void testSASTokenProviderNullSASToken() throws Exception { testFs.initialize(fs.getUri(), this.getConfiguration().getRawConfiguration()); intercept(SASTokenProviderException.class, ()-> { - testFs.create(new org.apache.hadoop.fs.Path("/testFile")); + testFs.create(new org.apache.hadoop.fs.Path("/testFile")).close(); }); } @@ -297,7 +297,7 @@ private void executeOp(Path reqPath, AzureBlobFileSystem fs, fs.listStatus(reqPath); break; case CreatePath: - fs.create(reqPath); + fs.create(reqPath).close(); break; case RenamePath: fs.rename(reqPath, diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java index bc74703496543..5cba89ac4a5a6 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java @@ -398,7 +398,7 @@ public void testProperties() throws Exception { public void testSignatureMask() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); String src = String.format("/testABC/test%s.xt", UUID.randomUUID()); - fs.create(new Path(src)); + fs.create(new Path(src)).close(); AbfsRestOperation abfsHttpRestOperation = fs.getAbfsClient() .renamePath(src, "/testABC" + "/abc.txt", null, getTestTracingContext(fs, false)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java index 99cce35f1f8cc..56016a39470e4 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java @@ -176,12 +176,13 @@ public void testReadWithFileNotFoundException() throws Exception { final Path testFilePath = path(methodName.getMethodName()); testWriteOneByteToFile(testFilePath); - FSDataInputStream inputStream = fs.open(testFilePath, TEST_DEFAULT_BUFFER_SIZE); - fs.delete(testFilePath, true); - assertPathDoesNotExist(fs, "This path should not exist", testFilePath); + try (FSDataInputStream inputStream = fs.open(testFilePath, + TEST_DEFAULT_BUFFER_SIZE)) { + fs.delete(testFilePath, true); + assertPathDoesNotExist(fs, "This path should not exist", testFilePath); - intercept(FileNotFoundException.class, - () -> inputStream.read(new byte[1])); + intercept(FileNotFoundException.class, () -> inputStream.read(new byte[1])); + } } @Test @@ -189,16 +190,16 @@ public void testWriteWithFileNotFoundException() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final Path testFilePath = path(methodName.getMethodName()); - FSDataOutputStream stream = fs.create(testFilePath); - assertPathExists(fs, "Path should exist", testFilePath); - stream.write(TEST_BYTE); + try (FSDataOutputStream stream = fs.create(testFilePath)) { + assertPathExists(fs, "Path should exist", testFilePath); + stream.write(TEST_BYTE); - fs.delete(testFilePath, true); - assertPathDoesNotExist(fs, "This path should not exist", testFilePath); + fs.delete(testFilePath, true); + assertPathDoesNotExist(fs, "This path should not exist", testFilePath); - // trigger append call - intercept(FileNotFoundException.class, - () -> stream.close()); + // trigger append call + intercept(FileNotFoundException.class, () -> stream.close()); + } } @Test @@ -209,14 +210,14 @@ public void testFlushWithFileNotFoundException() throws Exception { return; } - FSDataOutputStream stream = fs.create(testFilePath); - assertPathExists(fs, "This path should exist", testFilePath); + try (FSDataOutputStream stream = fs.create(testFilePath)) { + assertPathExists(fs, "This path should exist", testFilePath); - fs.delete(testFilePath, true); - assertPathDoesNotExist(fs, "This path should not exist", testFilePath); + fs.delete(testFilePath, true); + assertPathDoesNotExist(fs, "This path should not exist", testFilePath); - intercept(FileNotFoundException.class, - () -> stream.close()); + intercept(FileNotFoundException.class, () -> stream.close()); + } } private void testWriteOneByteToFile(Path testFilePath) throws Exception { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java index 4a8528ed7b44f..4fa7a0fca68ae 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java @@ -112,10 +112,10 @@ public void testAbfsPathWithHost() throws IOException { Path pathwithouthost2 = new Path("/abfs/file2.txt"); // verify compatibility of this path format - fs.create(pathWithHost1); + fs.create(pathWithHost1).close(); assertPathExists(fs, "This path should exist", pathwithouthost1); - fs.create(pathwithouthost2); + fs.create(pathwithouthost2).close(); assertPathExists(fs, "This path should exist", pathWithHost2); // verify get @@ -135,7 +135,7 @@ public void testLastModifiedTime() throws IOException { // Dividing and multiplying by 1000 to make last 3 digits 0. // It is observed that modification time is returned with last 3 // digits 0 always. - fs.create(testFilePath); + fs.create(testFilePath).close(); long createEndTime = System.currentTimeMillis(); FileStatus fStat = fs.getFileStatus(testFilePath); long lastModifiedTime = fStat.getModificationTime(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java index 40a551cd60a30..9ac1aa5d38a66 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java @@ -316,14 +316,13 @@ public void testTracingHeaderForAppendBlob() throws Exception { byte[] buf = new byte[10]; new Random().nextBytes(buf); - FSDataOutputStream out = fs.create(new Path("/testFile")); - ((AbfsOutputStream) out.getWrappedStream()).registerListener( - new TracingHeaderValidator( - fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), - fs.getFileSystemId(), FSOperationType.WRITE, false, 0, - ((AbfsOutputStream) out.getWrappedStream()).getStreamID())); - out.write(buf); - out.hsync(); + try (FSDataOutputStream out = fs.create(new Path("/testFile"))) { + ((AbfsOutputStream) out.getWrappedStream()).registerListener(new TracingHeaderValidator( + fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), fs.getFileSystemId(), FSOperationType.WRITE, false, 0, + ((AbfsOutputStream) out.getWrappedStream()).getStreamID())); + out.write(buf); + out.hsync(); + } } @Test @@ -383,9 +382,10 @@ private byte[] getRandomBytesArray() { private FSDataOutputStream getStreamAfterWrite(AzureBlobFileSystem fs, Path path, byte[] buffer, boolean enableFlush) throws IOException { fs.getAbfsStore().getAbfsConfiguration().setEnableFlush(enableFlush); - FSDataOutputStream stream = fs.create(path); - stream.write(buffer); - return stream; + try (FSDataOutputStream stream = fs.create(path)) { + stream.write(buffer); + return stream; + } } private void validate(InputStream stream, byte[] writeBuffer, boolean isEqual) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java index 1d898ebd355b4..bc6f35c66bc53 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java @@ -79,7 +79,7 @@ DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE && getIsNamespaceEnabled( getFileSystem())); final AzureBlobFileSystem fs = getFileSystem(); Path path = path("testFilePath"); - fs.create(path); + fs.create(path).close(); assertTrue(fs.getFileStatus(path).isFile()); intercept(FileAlreadyExistsException.class, () -> fs.mkdirs(path)); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java index 33ad812ada804..6f12d81e2b1ee 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java @@ -158,7 +158,7 @@ public void testBlobDataReader() throws Exception { // TEST WRITE FILE try { abfsStore.openFileForWrite(existedFilePath, fs.getFsStatistics(), true, - tracingContext); + tracingContext).close(); } catch (AbfsRestOperationException e) { assertEquals(AzureServiceErrorCode.AUTHORIZATION_PERMISSION_MISS_MATCH, e.getErrorCode()); } finally { @@ -171,7 +171,7 @@ private void prepareFiles(Path existedFilePath, Path existedFolderPath) throws I // create test files/folders to verify access control diff between // Blob data contributor and Blob data reader final AzureBlobFileSystem fs = this.getFileSystem(); - fs.create(existedFilePath); + fs.create(existedFilePath).close(); assertPathExists(fs, "This path should exist", existedFilePath); fs.mkdirs(existedFolderPath); assertPathExists(fs, "This path should exist", existedFolderPath); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java index 138e2023786bc..0d644b6c743d0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java @@ -84,7 +84,8 @@ public void testFilePermission() throws Exception { new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE)); fs.removeDefaultAcl(path.getParent()); - fs.create(path, permission, true, KILOBYTE, (short) 1, KILOBYTE - 1, null); + fs.create(path, permission, true, KILOBYTE, (short) 1, KILOBYTE - 1, + null).close(); FileStatus status = fs.getFileStatus(path); Assert.assertEquals(permission.applyUMask(DEFAULT_UMASK_PERMISSION), status.getPermission()); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java index 441f3b1d81e67..b12af5b0826ab 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java @@ -313,7 +313,7 @@ private void testRenameTimeout( when(op.getResult()).thenReturn(http400Op); } else if (renameRequestStatus == HTTP_NOT_FOUND) { // Create the file new. - fs.create(destinationPath); + fs.create(destinationPath).close(); when(op.getResult()).thenReturn(http404Op); if (isOldOp) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java index 59056c1ffe8a9..b7b1a3bba2db6 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java @@ -553,14 +553,15 @@ private void testFlush(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); final String testFileName = path("/" + methodName.getMethodName()) .toString(); - fs.create(new Path(testFileName)); + fs.create(new Path(testFileName)).close(); AbfsClient abfsClient = fs.getAbfsClient(); String expectedCPKSha = getCPKSha(fs); byte[] fileContent = getRandomBytesArray(FILE_SIZE); Path testFilePath = new Path(testFileName + "1"); - FSDataOutputStream oStream = fs.create(testFilePath); - oStream.write(fileContent); + try (FSDataOutputStream oStream = fs.create(testFilePath)) { + oStream.write(fileContent); + } // Trying to read with different CPK headers Configuration conf = fs.getConf(); @@ -800,7 +801,7 @@ private void testCheckAccess(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); final String testFileName = path("/" + methodName.getMethodName()) .toString(); - fs.create(new Path(testFileName)); + fs.create(new Path(testFileName)).close(); AbfsClient abfsClient = fs.getAbfsClient(); AbfsRestOperation abfsRestOperation = abfsClient .checkAccess(testFileName, "rwx", getTestTracingContext(fs, false)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java index 5b1a014ee7ade..431c456ae3daa 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java @@ -70,19 +70,18 @@ public void testMaxRequestsAndQueueCapacity() throws Exception { conf.set(ConfigurationKeys.AZURE_WRITE_MAX_REQUESTS_TO_QUEUE, "" + maxRequestsToQueue); final AzureBlobFileSystem fs = getFileSystem(conf); - FSDataOutputStream out = fs.create(path(TEST_FILE_PATH)); - AbfsOutputStream stream = (AbfsOutputStream) out.getWrappedStream(); + try (FSDataOutputStream out = fs.create(path(TEST_FILE_PATH))) { + AbfsOutputStream stream = (AbfsOutputStream) out.getWrappedStream(); - if (stream.isAppendBlobStream()) { - maxConcurrentRequests = 1; - } + if (stream.isAppendBlobStream()) { + maxConcurrentRequests = 1; + } - Assertions.assertThat(stream.getMaxConcurrentRequestCount()) - .describedAs("maxConcurrentRequests should be " + maxConcurrentRequests) - .isEqualTo(maxConcurrentRequests); - Assertions.assertThat(stream.getMaxRequestsThatCanBeQueued()) - .describedAs("maxRequestsToQueue should be " + maxRequestsToQueue) - .isEqualTo(maxRequestsToQueue); + Assertions.assertThat(stream.getMaxConcurrentRequestCount()).describedAs( + "maxConcurrentRequests should be " + maxConcurrentRequests).isEqualTo(maxConcurrentRequests); + Assertions.assertThat(stream.getMaxRequestsThatCanBeQueued()).describedAs("maxRequestsToQueue should be " + maxRequestsToQueue) + .isEqualTo(maxRequestsToQueue); + } } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java index b37b320a70a44..62326e0dbb353 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java @@ -584,12 +584,13 @@ public void testDefaultReadaheadQueueDepth() throws Exception { config.unset(FS_AZURE_READ_AHEAD_QUEUE_DEPTH); AzureBlobFileSystem fs = getFileSystem(config); Path testFile = path("/testFile"); - fs.create(testFile); + fs.create(testFile).close(); FSDataInputStream in = fs.open(testFile); Assertions.assertThat( ((AbfsInputStream) in.getWrappedStream()).getReadAheadQueueDepth()) .describedAs("readahead queue depth should be set to default value 2") .isEqualTo(2); + in.close(); } From 9004aeb7db040943f7a561d378c3d69e9289534e Mon Sep 17 00:00:00 2001 From: sumangala Date: Mon, 26 Jul 2021 20:12:49 +0530 Subject: [PATCH 11/11] test fix --- .../org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java | 2 -- .../hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java | 7 +++---- .../hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java | 2 +- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java index a088b9babdbd0..98162fee08e9f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java @@ -31,8 +31,6 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_LOGGING_LEVEL; import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_LOGGING_LEVEL_INFO; -import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; -import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; /** * Tests AzureBlobFileSystem Statistics. diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java index 9ac1aa5d38a66..d27f9fa62194d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java @@ -382,10 +382,9 @@ private byte[] getRandomBytesArray() { private FSDataOutputStream getStreamAfterWrite(AzureBlobFileSystem fs, Path path, byte[] buffer, boolean enableFlush) throws IOException { fs.getAbfsStore().getAbfsConfiguration().setEnableFlush(enableFlush); - try (FSDataOutputStream stream = fs.create(path)) { - stream.write(buffer); - return stream; - } + FSDataOutputStream stream = fs.create(path); + stream.write(buffer); + return stream; } private void validate(InputStream stream, byte[] writeBuffer, boolean isEqual) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java index 6f12d81e2b1ee..f27e75839b73f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java @@ -158,7 +158,7 @@ public void testBlobDataReader() throws Exception { // TEST WRITE FILE try { abfsStore.openFileForWrite(existedFilePath, fs.getFsStatistics(), true, - tracingContext).close(); + tracingContext); } catch (AbfsRestOperationException e) { assertEquals(AzureServiceErrorCode.AUTHORIZATION_PERMISSION_MISS_MATCH, e.getErrorCode()); } finally {