From ca7a8795bc4a64befedddd0c158b255b163f9d01 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Fri, 3 Jan 2020 16:46:44 +0000 Subject: [PATCH 1/5] HADOOP-16785: wasb to raise IOE if write() invoked on a closed stream with * test * bit of extra asserts to help debug config * and LTU.intercept to declare class of expected exception when none was raised. Change-Id: I37951f0a3881d9b9849c4a51159d6670583f65c1 --- .../apache/hadoop/test/LambdaTestUtils.java | 2 +- .../fs/azure/NativeAzureFileSystem.java | 14 +++++++++++++ .../hadoop/fs/azure/AbstractWasbTestBase.java | 2 +- ...tFileSystemOperationExceptionHandling.java | 20 +++++++++++++++++++ 4 files changed, 36 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java index db36154c158ac..ad265afc3a022 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java @@ -406,7 +406,7 @@ public static E intercept( throws Exception { try { eval.call(); - throw new AssertionError("Expected an exception"); + throw new AssertionError("Expected an exception of type " + clazz); } catch (Throwable e) { if (clazz.isAssignableFrom(e.getClass())) { return (E)e; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java index 93c54d386f716..9955346f66e1d 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java @@ -1083,6 +1083,7 @@ public synchronized void close() throws IOException { */ @Override public void write(int b) throws IOException { + checkOpen(); try { out.write(b); } catch(IOException e) { @@ -1106,6 +1107,7 @@ public void write(int b) throws IOException { */ @Override public void write(byte[] b) throws IOException { + checkOpen(); try { out.write(b); } catch(IOException e) { @@ -1136,6 +1138,7 @@ public void write(byte[] b) throws IOException { */ @Override public void write(byte[] b, int off, int len) throws IOException { + checkOpen(); try { out.write(b, off, len); } catch(IOException e) { @@ -1198,6 +1201,17 @@ public void setEncodedKey(String anEncodedKey) { private void restoreKey() throws IOException { store.rename(getEncodedKey(), getKey()); } + + /** + * Check for the stream being open. + * @throws IOException if the stream is closed. + */ + private void checkOpen() throws IOException { + if (out == null) { + throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); + } + } + } private URI uri; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java index 0d3a06c36f2da..d5c1dce8cd9ab 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java @@ -52,7 +52,7 @@ public abstract class AbstractWasbTestBase extends AbstractWasbTestWithTimeout @Before public void setUp() throws Exception { AzureBlobStorageTestAccount account = createTestAccount(); - assumeNotNull(account); + assumeNotNull("test account", account); bindToTestAccount(account); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionHandling.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionHandling.java index a45dae48918df..7c437f3bc5140 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionHandling.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationExceptionHandling.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs.azure; import java.io.FileNotFoundException; +import java.io.IOException; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -30,7 +31,9 @@ import org.junit.After; import org.junit.Test; +import static org.apache.hadoop.fs.FSExceptionMessages.STREAM_IS_CLOSED; import static org.apache.hadoop.fs.azure.ExceptionHandlingTestHelper.*; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** * Single threaded exception handling. @@ -265,6 +268,23 @@ public void testSingleThreadedPageBlobOpenScenario() throws Throwable { inputStream = fs.open(testPath); } + /** + * Attempts to write to the azure stream after it is closed will raise + * an IOException. + */ + @Test + public void testWriteAfterClose() throws Throwable { + FSDataOutputStream out = fs.create(testPath); + out.close(); + intercept(IOException.class, STREAM_IS_CLOSED, + () -> out.write('a')); + intercept(IOException.class, STREAM_IS_CLOSED, + () -> out.write(new byte[]{'a'})); + out.hsync(); + out.flush(); + out.close(); + } + @After public void tearDown() throws Exception { if (inputStream != null) { From 15e88b0baeaec1b7306d73412bcbaf1b40c22fab Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Mon, 6 Jan 2020 14:26:16 +0000 Subject: [PATCH 2/5] HADOOP-16785: replicate test to verify ABFS is robust here. It is, but hsync() fails on a closed file. Commented out that operation. It does handle flush(), which is good as some apps do call it on closed files. Change-Id: I25414cc500feb9391dc520e5292250945d6576e6 --- .../ITestAzureBlobFileSystemCreate.java | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java index ab01166b9b321..d421cff468ef8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs.azurebfs; import java.io.FileNotFoundException; +import java.io.IOException; import java.util.EnumSet; import org.junit.Test; @@ -29,6 +30,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** * Test create operation. @@ -104,4 +106,22 @@ public void testCreateNonRecursive2() throws Exception { .close(); assertIsFile(fs, testFile); } + + /** + * Attempts to write to the azure stream after it is closed will raise + * an IOException. + */ + @Test + public void testWriteAfterClose() throws Throwable { + final AzureBlobFileSystem fs = getFileSystem(); + Path testPath = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + FSDataOutputStream out = fs.create(testPath); + out.close(); + intercept(IOException.class, () -> out.write('a')); + intercept(IOException.class, () -> out.write(new byte[]{'a'})); + // hsync is not ignored on a closed stream + // out.hsync();Are you + out.flush(); + out.close(); + } } From a675fe2ec49f5471b702ea56788e6177f430ffa8 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Mon, 6 Jan 2020 14:44:05 +0000 Subject: [PATCH 3/5] HADOOP-16785: abfs test case to look at FilterOutputStream double close Change-Id: I26af214bd40b28aa006d27e6ef22b8cefb39a799 --- .../ITestAzureBlobFileSystemCreate.java | 24 ++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java index d421cff468ef8..e2c9b4b37d980 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs.azurebfs; import java.io.FileNotFoundException; +import java.io.FilterOutputStream; import java.io.IOException; import java.util.EnumSet; @@ -108,8 +109,7 @@ public void testCreateNonRecursive2() throws Exception { } /** - * Attempts to write to the azure stream after it is closed will raise - * an IOException. + * Attempts to use to the ABFS stream after it is closed. */ @Test public void testWriteAfterClose() throws Throwable { @@ -120,7 +120,25 @@ public void testWriteAfterClose() throws Throwable { intercept(IOException.class, () -> out.write('a')); intercept(IOException.class, () -> out.write(new byte[]{'a'})); // hsync is not ignored on a closed stream - // out.hsync();Are you + // out.hsync(); + out.flush(); + out.close(); + } + + /** + * Attempts to double close an ABFS output stream from within a + * FilterOutputStream. + * That class handles a double failure on close badly if the second + * exception rethrows the first. + */ + @Test + public void testFilteredDoubleClose() throws Throwable { + final AzureBlobFileSystem fs = getFileSystem(); + Path testPath = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + FilterOutputStream out = new FilterOutputStream(fs.create(testPath)); + out.close(); + intercept(IOException.class, () -> out.write('a')); + intercept(IOException.class, () -> out.write(new byte[]{'a'})); out.flush(); out.close(); } From a2c8541b0812f523aebfc4af20a8490b44c2decc Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Mon, 6 Jan 2020 15:52:36 +0000 Subject: [PATCH 4/5] HADOOP-16785 abfs to self suppression of IOE in try-with-resources Change-Id: I5ffa417219249e102bf7e861d357b57af3b780f0 --- .../hadoop/fs/azurebfs/services/AbfsOutputStream.java | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java index 2d409416e8647..5ee0208018324 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java @@ -259,7 +259,15 @@ public synchronized void close() throws IOException { } private synchronized void flushInternal(boolean isClose) throws IOException { - maybeThrowLastError(); + try { + maybeThrowLastError(); + } catch (IOException e) { + if (isClose) { + // wrap existing exception so as to avoid breaking try-with-resources + throw new IOException("Skipping final flush and write due to " + e, e); + } else + throw e; + } writeCurrentBufferToService(); flushWrittenBytesToService(isClose); } From cc1b5803629a90e8c9d1cc72fb9ae45582b30b9a Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Tue, 7 Jan 2020 17:18:39 +0000 Subject: [PATCH 5/5] HADOOP-16875: AbfsOutputStream.close() wraps IOEs with a new instance of the same class. This moves the wrapping logic out of flushInternal into close(), using IOUtils.wrapException to ensure the type of the wrapped exception is the same as that caught (without that the E2E exceptions fail). the test uses try-with-resources as suggested by Thomas; we delete a file while it is being written; this causes hsync to fail -with close() also throwing an exception which is then caught and suppressed. Change-Id: If1bcd2885bccbf39de888b8295a5a7e529935f2d --- .../azurebfs/services/AbfsOutputStream.java | 18 ++++++------ .../ITestAzureBlobFileSystemCreate.java | 29 ++++++++++++++----- 2 files changed, 30 insertions(+), 17 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java index 5ee0208018324..7e9746d118ce8 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java @@ -43,6 +43,8 @@ import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.Syncable; +import static org.apache.hadoop.io.IOUtils.wrapException; + /** * The BlobFsOutputStream for Rest AbfsClient. */ @@ -246,6 +248,12 @@ public synchronized void close() throws IOException { try { flushInternal(true); threadExecutor.shutdown(); + } catch (IOException e) { + // Problems surface in try-with-resources clauses if + // the exception thrown in a close == the one already thrown + // -so we wrap any exception with a new one. + // See HADOOP-16785 + throw wrapException(path, e.getMessage(), e); } finally { lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED); buffer = null; @@ -259,15 +267,7 @@ public synchronized void close() throws IOException { } private synchronized void flushInternal(boolean isClose) throws IOException { - try { - maybeThrowLastError(); - } catch (IOException e) { - if (isClose) { - // wrap existing exception so as to avoid breaking try-with-resources - throw new IOException("Skipping final flush and write due to " + e, e); - } else - throw e; - } + maybeThrowLastError(); writeCurrentBufferToService(); flushWrittenBytesToService(isClose); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java index e2c9b4b37d980..d9ac03ecf648f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java @@ -19,7 +19,6 @@ package org.apache.hadoop.fs.azurebfs; import java.io.FileNotFoundException; -import java.io.FilterOutputStream; import java.io.IOException; import java.util.EnumSet; @@ -29,6 +28,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.test.GenericTestUtils; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile; import static org.apache.hadoop.test.LambdaTestUtils.intercept; @@ -132,14 +132,27 @@ public void testWriteAfterClose() throws Throwable { * exception rethrows the first. */ @Test - public void testFilteredDoubleClose() throws Throwable { + public void testTryWithResources() throws Throwable { final AzureBlobFileSystem fs = getFileSystem(); Path testPath = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); - FilterOutputStream out = new FilterOutputStream(fs.create(testPath)); - out.close(); - intercept(IOException.class, () -> out.write('a')); - intercept(IOException.class, () -> out.write(new byte[]{'a'})); - out.flush(); - out.close(); + try (FSDataOutputStream out = fs.create(testPath)) { + out.write('1'); + out.hsync(); + // this will cause the next write to failAll + fs.delete(testPath, false); + out.write('2'); + out.hsync(); + fail("Expected a failure"); + } catch (FileNotFoundException fnfe) { + // the exception raised in close() must be in the caught exception's + // suppressed list + Throwable[] suppressed = fnfe.getSuppressed(); + assertEquals("suppressed count", 1, suppressed.length); + Throwable inner = suppressed[0]; + if (!(inner instanceof IOException)) { + throw inner; + } + GenericTestUtils.assertExceptionContains(fnfe.getMessage(), inner); + } } }