diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
index a2273bf83343b..2603ae342a2ad 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
@@ -33,6 +33,7 @@
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.ByteBufferPositionedReadable;
 import org.apache.hadoop.fs.ByteBufferReadable;
 import org.apache.hadoop.fs.CanSetDropBehind;
 import org.apache.hadoop.fs.CanSetReadahead;
@@ -328,20 +329,40 @@ public int read(long position, byte[] buffer, int offset, int length)
       throws IOException {
     checkStream();
     try {
-      final int n = ((PositionedReadable) in).read(position, buffer, offset, 
+      final int n = ((PositionedReadable) in).read(position, buffer, offset,
           length);
       if (n > 0) {
         // This operation does not change the current offset of the file
         decrypt(position, buffer, offset, n);
       }
-      
+
       return n;
     } catch (ClassCastException e) {
       throw new UnsupportedOperationException("This stream does not support " +
           "positioned read.");
     }
   }
-  
+
+  /**
+   * Positioned readFully using {@link ByteBuffer}s. This method is thread-safe.
+   */
+  // @Override
+  public void readFully(long position, final ByteBuffer buf)
+      throws IOException {
+    checkStream();
+    if (!(in instanceof ByteBufferPositionedReadable)) {
+      throw new UnsupportedOperationException(in.getClass().getCanonicalName()
+          + " does not support positioned reads with byte buffers.");
+    }
+    int bufPos = buf.position();
+    ((ByteBufferPositionedReadable) in).readFully(position, buf);
+    final int n = buf.position() - bufPos;
+    if (n > 0) {
+      // This operation does not change the current offset of the file
+      decrypt(position, buf, n, bufPos);
+    }
+  }
+
   /**
    * Decrypt length bytes in buffer starting at offset. Output is also put 
    * into buffer starting at offset. It is thread-safe.
@@ -375,7 +396,7 @@ private void decrypt(long position, byte[] buffer, int offset, int length)
       returnDecryptor(decryptor);
     }
   }
-  
+
   /** Positioned read fully. It is thread-safe */
   @Override
   public void readFully(long position, byte[] buffer, int offset, int length)
@@ -407,7 +428,7 @@ public void seek(long pos) throws IOException {
     checkStream();
     try {
       /*
-       * If data of target pos in the underlying stream has already been read 
+       * If data of target pos in the underlying stream has already been read
        * and decrypted in outBuffer, we just need to re-position outBuffer.
        */
       if (pos <= streamOffset && pos >= (streamOffset - outBuffer.remaining())) {
@@ -523,7 +544,7 @@ public int read(ByteBuffer buf) throws IOException {
    * Output is also buf and same start position.
    * buf.position() and buf.limit() should be unchanged after decryption.
    */
-  private void decrypt(ByteBuffer buf, int n, int start) 
+  private void decrypt(ByteBuffer buf, int n, int start)
       throws IOException {
     final int pos = buf.position();
     final int limit = buf.limit();
@@ -545,7 +566,52 @@ private void decrypt(ByteBuffer buf, int n, int start)
     }
     buf.position(pos);
   }
-  
+
+  private void decrypt(long filePosition, ByteBuffer buf, int length, int start)
+          throws IOException {
+    ByteBuffer localInBuffer = null;
+    ByteBuffer localOutBuffer = null;
+
+    // Duplicate the buffer so we don't have to worry about resetting the
+    // original position and limit at the end of the method
+    buf = buf.duplicate();
+
+    int decryptedBytes = 0;
+    Decryptor localDecryptor = null;
+    try {
+      localInBuffer = getBuffer();
+      localOutBuffer = getBuffer();
+      localDecryptor = getDecryptor();
+      byte[] localIV = initIV.clone();
+      updateDecryptor(localDecryptor, filePosition, localIV);
+      byte localPadding = getPadding(filePosition);
+      // Set proper filePosition for inputdata.
+      localInBuffer.position(localPadding);
+
+      while (decryptedBytes < length) {
+        buf.position(start + decryptedBytes);
+        buf.limit(start + decryptedBytes +
+                Math.min(length - decryptedBytes, localInBuffer.remaining()));
+        localInBuffer.put(buf);
+        // Do decryption
+        try {
+          decrypt(localDecryptor, localInBuffer, localOutBuffer, localPadding);
+          buf.position(start + decryptedBytes);
+          buf.limit(start + length);
+          decryptedBytes += localOutBuffer.remaining();
+          buf.put(localOutBuffer);
+        } finally {
+          localPadding = afterDecryption(localDecryptor, localInBuffer,
+                                         filePosition + length, localIV);
+        }
+      }
+    } finally {
+      returnBuffer(localInBuffer);
+      returnBuffer(localOutBuffer);
+      returnDecryptor(localDecryptor);
+    }
+  }
+
   @Override
   public int available() throws IOException {
     checkStream();
@@ -605,7 +671,7 @@ public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
       }
       return buffer;
     } catch (ClassCastException e) {
-      throw new UnsupportedOperationException("This stream does not support " + 
+      throw new UnsupportedOperationException("This stream does not support " +
           "enhanced byte buffer access.");
     }
   }
@@ -740,6 +806,7 @@ public boolean hasCapability(String capability) {
     case StreamCapabilities.READAHEAD:
     case StreamCapabilities.DROPBEHIND:
     case StreamCapabilities.UNBUFFER:
+    case StreamCapabilities.READBYTEBUFFER:
       return true;
     default:
       return false;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java
new file mode 100644
index 0000000000000..f8282d88c46c3
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Implementers of this interface provide a positioned read API that writes to a
+ * {@link ByteBuffer} rather than a {@code byte[]}.
+ *
+ * @see PositionedReadable
+ * @see ByteBufferReadable
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface ByteBufferPositionedReadable {
+  /**
+   * Reads up to {@code buf.remaining()} bytes into buf from a given position
+   * in the file and returns the number of bytes read. Callers should use
+   * {@code buf.limit(...)} to control the size of the desired read and
+   * {@code buf.position(...)} to control the offset into the buffer the data
+   * should be written to.
+   * 
+   * After a successful call, {@code buf.position()} will be advanced by the
+   * number of bytes read and {@code buf.limit()} will be unchanged.
+   * 
+   * In the case of an exception, the state of the buffer (the contents of the
+   * buffer, the {@code buf.position()}, the {@code buf.limit()}, etc.) is
+   * undefined, and callers should be prepared to recover from this
+   * eventuality.
+   * 
+   * Callers should use {@link StreamCapabilities#hasCapability(String)} with
+   * {@link StreamCapabilities#PREADBYTEBUFFER} to check if the underlying
+   * stream supports this interface, otherwise they might get a
+   * {@link UnsupportedOperationException}.
+   * 
+   * Implementations should treat 0-length requests as legitimate, and must not
+   * signal an error upon their receipt.
+   * 
+   * This does not change the current offset of a file, and is thread-safe.
+   *
+   * @param position position within file
+   * @param buf the ByteBuffer to receive the results of the read operation.
+   * @return the number of bytes read, possibly zero, or -1 if reached
+   *         end-of-stream
+   * @throws IOException if there is some error performing the read
+   */
+  int read(long position, ByteBuffer buf) throws IOException;
+
+  /**
+   * Reads {@code buf.remaining()} bytes into buf from a given position in
+   * the file or until the end of the data was reached before the read
+   * operation completed. Callers should use {@code buf.limit(...)} to
+   * control the size of the desired read and {@code buf.position(...)} to
+   * control the offset into the buffer the data should be written to.
+   * 
+   * This operation provides similar semantics to
+   * {@link #read(long, ByteBuffer)}, the difference is that this method is
+   * guaranteed to read data until the {@link ByteBuffer} is full, or until
+   * the end of the data stream is reached.
+   *
+   * @param position position within file
+   * @param buf the ByteBuffer to receive the results of the read operation.
+   * @throws IOException if there is some error performing the read
+   * @throws EOFException the end of the data was reached before
+   * the read operation completed
+   * @see #read(long, ByteBuffer)
+   */
+  void readFully(long position, ByteBuffer buf) throws IOException;
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
index 08d71f16c0783..31f82975899e1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.fs;
 
 import java.io.DataInputStream;
+import java.io.EOFException;
 import java.io.FileDescriptor;
 import java.io.FileInputStream;
 import java.io.IOException;
@@ -38,7 +39,8 @@
 public class FSDataInputStream extends DataInputStream
     implements Seekable, PositionedReadable, 
       ByteBufferReadable, HasFileDescriptor, CanSetDropBehind, CanSetReadahead,
-      HasEnhancedByteBufferAccess, CanUnbuffer, StreamCapabilities {
+      HasEnhancedByteBufferAccess, CanUnbuffer, StreamCapabilities,
+      ByteBufferPositionedReadable {
   /**
    * Map ByteBuffers that we have handed out to readers to ByteBufferPool 
    * objects
@@ -50,8 +52,8 @@ public class FSDataInputStream extends DataInputStream
   public FSDataInputStream(InputStream in) {
     super(in);
     if( !(in instanceof Seekable) || !(in instanceof PositionedReadable) ) {
-      throw new IllegalArgumentException(
-          "In is not an instance of Seekable or PositionedReadable");
+      throw new IllegalArgumentException(in.getClass().getCanonicalName() +
+          " is not an instance of Seekable or PositionedReadable");
     }
   }
   
@@ -147,7 +149,8 @@ public int read(ByteBuffer buf) throws IOException {
       return ((ByteBufferReadable)in).read(buf);
     }
 
-    throw new UnsupportedOperationException("Byte-buffer read unsupported by input stream");
+    throw new UnsupportedOperationException("Byte-buffer read unsupported " +
+            "by " + in.getClass().getCanonicalName());
   }
 
   @Override
@@ -167,9 +170,8 @@ public void setReadahead(Long readahead)
     try {
       ((CanSetReadahead)in).setReadahead(readahead);
     } catch (ClassCastException e) {
-      throw new UnsupportedOperationException(
-          "this stream does not support setting the readahead " +
-          "caching strategy.");
+      throw new UnsupportedOperationException(in.getClass().getCanonicalName() +
+          " does not support setting the readahead caching strategy.");
     }
   }
 
@@ -246,4 +248,23 @@ public boolean hasCapability(String capability) {
   public String toString() {
     return super.toString() + ": " + in;
   }
+
+  @Override
+  public int read(long position, ByteBuffer buf) throws IOException {
+    if (in instanceof ByteBufferPositionedReadable) {
+      return ((ByteBufferPositionedReadable) in).read(position, buf);
+    }
+    throw new UnsupportedOperationException("Byte-buffer pread unsupported " +
+        "by " + in.getClass().getCanonicalName());
+  }
+
+  @Override
+  public void readFully(long position, ByteBuffer buf) throws IOException {
+    if (in instanceof ByteBufferPositionedReadable) {
+      ((ByteBufferPositionedReadable) in).readFully(position, buf);
+    } else {
+      throw new UnsupportedOperationException("Byte-buffer pread " +
+              "unsupported by " + in.getClass().getCanonicalName());
+    }
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java
index 3549cdc4fa392..9d4b6fe7bc2ae 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java
@@ -59,6 +59,17 @@ public interface StreamCapabilities {
    */
   String UNBUFFER = "in:unbuffer";
 
+  /**
+   * Stream read(ByteBuffer) capability implemented by
+   * {@link ByteBufferReadable#read(java.nio.ByteBuffer)}.
+   */
+  String READBYTEBUFFER = "in:readbytebuffer";
+  /**
+   * Stream read(long, ByteBuffer) capability implemented by
+   * {@link ByteBufferPositionedReadable#read(long, java.nio.ByteBuffer)}.
+   */
+  String PREADBYTEBUFFER = "in:preadbytebuffer";
+
   /**
    * Capabilities that a stream can support and be queried for.
    */
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c
index 402ffd5bb20a6..b463679fcdb6f 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c
@@ -199,8 +199,5 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsMapping_getGroupsForUser
   if (ginfo) {
     hadoop_group_info_free(ginfo);
   }
-  if (jgroupname) {
-    (*env)->DeleteLocalRef(env, jgroupname);
-  }
   return jgroups;
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
index a0eb105833809..64bb966b15b0f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
@@ -26,6 +26,7 @@
 import java.util.EnumSet;
 import java.util.Random;
 
+import org.apache.hadoop.fs.ByteBufferPositionedReadable;
 import org.apache.hadoop.fs.ByteBufferReadable;
 import org.apache.hadoop.fs.CanUnbuffer;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -129,6 +130,32 @@ private void preadCheck(PositionedReadable in) throws Exception {
     Assert.assertArrayEquals(result, expectedData);
   }
 
+  private int byteBufferPreadAll(ByteBufferPositionedReadable in,
+                                 ByteBuffer buf) throws IOException {
+    int n = 0;
+    int total = 0;
+    while (n != -1) {
+      total += n;
+      if (!buf.hasRemaining()) {
+        break;
+      }
+      n = in.read(total, buf);
+    }
+
+    return total;
+  }
+
+  private void byteBufferPreadCheck(ByteBufferPositionedReadable in)
+          throws Exception {
+    ByteBuffer result = ByteBuffer.allocate(dataLen);
+    int n = byteBufferPreadAll(in, result);
+
+    Assert.assertEquals(dataLen, n);
+    ByteBuffer expectedData = ByteBuffer.allocate(n);
+    expectedData.put(data, 0, n);
+    Assert.assertArrayEquals(result.array(), expectedData.array());
+  }
+
   protected OutputStream getOutputStream(int bufferSize) throws IOException {
     return getOutputStream(bufferSize, key, iv);
   }
@@ -288,20 +315,36 @@ private int readAll(InputStream in, long pos, byte[] b, int off, int len)
     
     return total;
   }
+
+  private int readAll(InputStream in, long pos, ByteBuffer buf)
+      throws IOException {
+    int n = 0;
+    int total = 0;
+    while (n != -1) {
+      total += n;
+      if (!buf.hasRemaining()) {
+        break;
+      }
+      n = ((ByteBufferPositionedReadable) in).read(pos + total, buf);
+    }
+
+    return total;
+  }
   
   /** Test positioned read. */
   @Test(timeout=120000)
   public void testPositionedRead() throws Exception {
-    OutputStream out = getOutputStream(defaultBufferSize);
-    writeData(out);
+    try (OutputStream out = getOutputStream(defaultBufferSize)) {
+      writeData(out);
+    }
     
-    InputStream in = getInputStream(defaultBufferSize);
-    // Pos: 1/3 dataLen
-    positionedReadCheck(in , dataLen / 3);
+    try (InputStream in = getInputStream(defaultBufferSize)) {
+      // Pos: 1/3 dataLen
+      positionedReadCheck(in, dataLen / 3);
 
-    // Pos: 1/2 dataLen
-    positionedReadCheck(in, dataLen / 2);
-    in.close();
+      // Pos: 1/2 dataLen
+      positionedReadCheck(in, dataLen / 2);
+    }
   }
   
   private void positionedReadCheck(InputStream in, int pos) throws Exception {
@@ -315,43 +358,71 @@ private void positionedReadCheck(InputStream in, int pos) throws Exception {
     System.arraycopy(data, pos, expectedData, 0, n);
     Assert.assertArrayEquals(readData, expectedData);
   }
+
+  /** Test positioned read with ByteBuffers. */
+  @Test(timeout=120000)
+  public void testPositionedReadWithByteBuffer() throws Exception {
+    try (OutputStream out = getOutputStream(defaultBufferSize)) {
+      writeData(out);
+    }
+
+    try (InputStream in = getInputStream(defaultBufferSize)) {
+      // Pos: 1/3 dataLen
+      positionedReadCheckWithByteBuffer(in, dataLen / 3);
+
+      // Pos: 1/2 dataLen
+      positionedReadCheckWithByteBuffer(in, dataLen / 2);
+    }
+  }
+
+  private void positionedReadCheckWithByteBuffer(InputStream in, int pos)
+          throws Exception {
+    ByteBuffer result = ByteBuffer.allocate(dataLen);
+    int n = readAll(in, pos, result);
+
+    Assert.assertEquals(dataLen, n + pos);
+    byte[] readData = new byte[n];
+    System.arraycopy(result.array(), 0, readData, 0, n);
+    byte[] expectedData = new byte[n];
+    System.arraycopy(data, pos, expectedData, 0, n);
+    Assert.assertArrayEquals(readData, expectedData);
+  }
   
-  /** Test read fully */
+  /** Test read fully. */
   @Test(timeout=120000)
   public void testReadFully() throws Exception {
     OutputStream out = getOutputStream(defaultBufferSize);
     writeData(out);
     
-    InputStream in = getInputStream(defaultBufferSize);
-    final int len1 = dataLen / 4;
-    // Read len1 bytes
-    byte[] readData = new byte[len1];
-    readAll(in, readData, 0, len1);
-    byte[] expectedData = new byte[len1];
-    System.arraycopy(data, 0, expectedData, 0, len1);
-    Assert.assertArrayEquals(readData, expectedData);
-    
-    // Pos: 1/3 dataLen
-    readFullyCheck(in, dataLen / 3);
-    
-    // Read len1 bytes
-    readData = new byte[len1];
-    readAll(in, readData, 0, len1);
-    expectedData = new byte[len1];
-    System.arraycopy(data, len1, expectedData, 0, len1);
-    Assert.assertArrayEquals(readData, expectedData);
-    
-    // Pos: 1/2 dataLen
-    readFullyCheck(in, dataLen / 2);
-    
-    // Read len1 bytes
-    readData = new byte[len1];
-    readAll(in, readData, 0, len1);
-    expectedData = new byte[len1];
-    System.arraycopy(data, 2 * len1, expectedData, 0, len1);
-    Assert.assertArrayEquals(readData, expectedData);
-    
-    in.close();
+    try (InputStream in = getInputStream(defaultBufferSize)) {
+      final int len1 = dataLen / 4;
+      // Read len1 bytes
+      byte[] readData = new byte[len1];
+      readAll(in, readData, 0, len1);
+      byte[] expectedData = new byte[len1];
+      System.arraycopy(data, 0, expectedData, 0, len1);
+      Assert.assertArrayEquals(readData, expectedData);
+
+      // Pos: 1/3 dataLen
+      readFullyCheck(in, dataLen / 3);
+
+      // Read len1 bytes
+      readData = new byte[len1];
+      readAll(in, readData, 0, len1);
+      expectedData = new byte[len1];
+      System.arraycopy(data, len1, expectedData, 0, len1);
+      Assert.assertArrayEquals(readData, expectedData);
+
+      // Pos: 1/2 dataLen
+      readFullyCheck(in, dataLen / 2);
+
+      // Read len1 bytes
+      readData = new byte[len1];
+      readAll(in, readData, 0, len1);
+      expectedData = new byte[len1];
+      System.arraycopy(data, 2 * len1, expectedData, 0, len1);
+      Assert.assertArrayEquals(readData, expectedData);
+    }
   }
   
   private void readFullyCheck(InputStream in, int pos) throws Exception {
@@ -369,6 +440,60 @@ private void readFullyCheck(InputStream in, int pos) throws Exception {
     } catch (EOFException e) {
     }
   }
+
+  /** Test byte byffer read fully. */
+  @Test(timeout=120000)
+  public void testByteBufferReadFully() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+
+    try (InputStream in = getInputStream(defaultBufferSize)) {
+      final int len1 = dataLen / 4;
+      // Read len1 bytes
+      byte[] readData = new byte[len1];
+      readAll(in, readData, 0, len1);
+      byte[] expectedData = new byte[len1];
+      System.arraycopy(data, 0, expectedData, 0, len1);
+      Assert.assertArrayEquals(readData, expectedData);
+
+      // Pos: 1/3 dataLen
+      byteBufferReadFullyCheck(in, dataLen / 3);
+
+      // Read len1 bytes
+      readData = new byte[len1];
+      readAll(in, readData, 0, len1);
+      expectedData = new byte[len1];
+      System.arraycopy(data, len1, expectedData, 0, len1);
+      Assert.assertArrayEquals(readData, expectedData);
+
+      // Pos: 1/2 dataLen
+      byteBufferReadFullyCheck(in, dataLen / 2);
+
+      // Read len1 bytes
+      readData = new byte[len1];
+      readAll(in, readData, 0, len1);
+      expectedData = new byte[len1];
+      System.arraycopy(data, 2 * len1, expectedData, 0, len1);
+      Assert.assertArrayEquals(readData, expectedData);
+    }
+  }
+
+  private void byteBufferReadFullyCheck(InputStream in, int pos)
+          throws Exception {
+    ByteBuffer result = ByteBuffer.allocate(dataLen - pos);
+    ((ByteBufferPositionedReadable) in).readFully(pos, result);
+
+    byte[] expectedData = new byte[dataLen - pos];
+    System.arraycopy(data, pos, expectedData, 0, dataLen - pos);
+    Assert.assertArrayEquals(result.array(), expectedData);
+
+    result = ByteBuffer.allocate(dataLen); // Exceeds maximum length
+    try {
+      ((ByteBufferPositionedReadable) in).readFully(pos, result);
+      Assert.fail("Read fully exceeds maximum length should fail.");
+    } catch (EOFException e) {
+    }
+  }
   
   /** Test seek to different position. */
   @Test(timeout=120000)
@@ -505,12 +630,40 @@ private void byteBufferReadCheck(InputStream in, ByteBuffer buf,
     System.arraycopy(data, 0, expectedData, 0, n);
     Assert.assertArrayEquals(readData, expectedData);
   }
+
+  private void byteBufferPreadCheck(InputStream in, ByteBuffer buf,
+      int bufPos) throws Exception {
+    // Test reading from position 0
+    buf.position(bufPos);
+    int n = ((ByteBufferPositionedReadable) in).read(0, buf);
+    Assert.assertEquals(bufPos + n, buf.position());
+    byte[] readData = new byte[n];
+    buf.rewind();
+    buf.position(bufPos);
+    buf.get(readData);
+    byte[] expectedData = new byte[n];
+    System.arraycopy(data, 0, expectedData, 0, n);
+    Assert.assertArrayEquals(readData, expectedData);
+
+    // Test reading from half way through the data
+    buf.position(bufPos);
+    n = ((ByteBufferPositionedReadable) in).read(dataLen / 2, buf);
+    Assert.assertEquals(bufPos + n, buf.position());
+    readData = new byte[n];
+    buf.rewind();
+    buf.position(bufPos);
+    buf.get(readData);
+    expectedData = new byte[n];
+    System.arraycopy(data, dataLen / 2, expectedData, 0, n);
+    Assert.assertArrayEquals(readData, expectedData);
+  }
   
   /** Test byte buffer read with different buffer size. */
   @Test(timeout=120000)
   public void testByteBufferRead() throws Exception {
-    OutputStream out = getOutputStream(defaultBufferSize);
-    writeData(out);
+    try (OutputStream out = getOutputStream(defaultBufferSize)) {
+      writeData(out);
+    }
     
     // Default buffer size, initial buffer position is 0
     InputStream in = getInputStream(defaultBufferSize);
@@ -560,6 +713,53 @@ public void testByteBufferRead() throws Exception {
     byteBufferReadCheck(in, buf, 11);
     in.close();
   }
+
+  /** Test byte buffer pread with different buffer size. */
+  @Test(timeout=120000)
+  public void testByteBufferPread() throws Exception {
+    try (OutputStream out = getOutputStream(defaultBufferSize)) {
+      writeData(out);
+    }
+
+    try (InputStream defaultBuf = getInputStream(defaultBufferSize);
+         InputStream smallBuf = getInputStream(smallBufferSize)) {
+
+      ByteBuffer buf = ByteBuffer.allocate(dataLen + 100);
+
+      // Default buffer size, initial buffer position is 0
+      byteBufferPreadCheck(defaultBuf, buf, 0);
+
+      // Default buffer size, initial buffer position is not 0
+      buf.clear();
+      byteBufferPreadCheck(defaultBuf, buf, 11);
+
+      // Small buffer size, initial buffer position is 0
+      buf.clear();
+      byteBufferPreadCheck(smallBuf, buf, 0);
+
+      // Small buffer size, initial buffer position is not 0
+      buf.clear();
+      byteBufferPreadCheck(smallBuf, buf, 11);
+
+      // Test with direct ByteBuffer
+      buf = ByteBuffer.allocateDirect(dataLen + 100);
+
+      // Direct buffer, default buffer size, initial buffer position is 0
+      byteBufferPreadCheck(defaultBuf, buf, 0);
+
+      // Direct buffer, default buffer size, initial buffer position is not 0
+      buf.clear();
+      byteBufferPreadCheck(defaultBuf, buf, 11);
+
+      // Direct buffer, small buffer size, initial buffer position is 0
+      buf.clear();
+      byteBufferPreadCheck(smallBuf, buf, 0);
+
+      // Direct buffer, small buffer size, initial buffer position is not 0
+      buf.clear();
+      byteBufferPreadCheck(smallBuf, buf, 11);
+    }
+  }
   
   @Test(timeout=120000)
   public void testCombinedOp() throws Exception {
@@ -797,5 +997,23 @@ public void testUnbuffer() throws Exception {
         // The close will be called when exiting this try-with-resource block
       }
     }
+
+    // Test ByteBuffer pread
+    try (InputStream in = getInputStream(smallBufferSize)) {
+      if (in instanceof ByteBufferPositionedReadable) {
+        ByteBufferPositionedReadable bbpin = (ByteBufferPositionedReadable) in;
+
+        // Test unbuffer after pread
+        byteBufferPreadCheck(bbpin);
+        ((CanUnbuffer) in).unbuffer();
+
+        // Test pread again after unbuffer
+        byteBufferPreadCheck(bbpin);
+
+        // Test close after unbuffer
+        ((CanUnbuffer) in).unbuffer();
+        // The close will be called when exiting this try-with-resource block
+      }
+    }
   }
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
index cd7391a02c38f..73c6249612387 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
@@ -26,6 +26,7 @@
 import java.util.EnumSet;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ByteBufferPositionedReadable;
 import org.apache.hadoop.fs.ByteBufferReadable;
 import org.apache.hadoop.fs.CanSetDropBehind;
 import org.apache.hadoop.fs.CanSetReadahead;
@@ -180,7 +181,7 @@ static class FakeInputStream extends InputStream
       implements Seekable, PositionedReadable, ByteBufferReadable,
                  HasFileDescriptor, CanSetDropBehind, CanSetReadahead,
                  HasEnhancedByteBufferAccess, CanUnbuffer,
-                 StreamCapabilities {
+                 StreamCapabilities, ByteBufferPositionedReadable {
     private final byte[] oneByteBuf = new byte[1];
     private int pos = 0;
     private final byte[] data;
@@ -303,6 +304,56 @@ public int read(long position, byte[] b, int off, int len)
       return -1;
     }
 
+    @Override
+    public int read(long position, ByteBuffer buf) throws IOException {
+      if (buf == null) {
+        throw new NullPointerException();
+      } else if (!buf.hasRemaining()) {
+        return 0;
+      }
+
+      if (position > length) {
+        throw new IOException("Cannot read after EOF.");
+      }
+      if (position < 0) {
+        throw new IOException("Cannot read to negative offset.");
+      }
+
+      checkStream();
+
+      if (position < length) {
+        int n = (int) Math.min(buf.remaining(), length - position);
+        buf.put(data, (int) position, n);
+        return n;
+      }
+
+      return -1;
+    }
+
+    @Override
+    public void readFully(long position, ByteBuffer buf) throws IOException {
+      if (buf == null) {
+        throw new NullPointerException();
+      } else if (!buf.hasRemaining()) {
+        return;
+      }
+
+      if (position > length) {
+        throw new IOException("Cannot read after EOF.");
+      }
+      if (position < 0) {
+        throw new IOException("Cannot read to negative offset.");
+      }
+
+      checkStream();
+
+      if (position + buf.remaining() > length) {
+        throw new EOFException("Reach the end of stream.");
+      }
+
+      buf.put(data, (int) position, buf.remaining());
+    }
+
     @Override
     public void readFully(long position, byte[] b, int off, int len)
         throws IOException {
@@ -378,6 +429,8 @@ public boolean hasCapability(String capability) {
       case StreamCapabilities.READAHEAD:
       case StreamCapabilities.DROPBEHIND:
       case StreamCapabilities.UNBUFFER:
+      case StreamCapabilities.READBYTEBUFFER:
+      case StreamCapabilities.PREADBYTEBUFFER:
         return true;
       default:
         return false;
@@ -439,7 +492,9 @@ public void testHasCapability() throws Exception {
         new String[] {
             StreamCapabilities.DROPBEHIND,
             StreamCapabilities.READAHEAD,
-            StreamCapabilities.UNBUFFER
+            StreamCapabilities.UNBUFFER,
+            StreamCapabilities.READBYTEBUFFER,
+            StreamCapabilities.PREADBYTEBUFFER
         },
         new String[] {
             StreamCapabilities.HFLUSH,
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
index bb3fd7a68d722..8453889b53a5a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
@@ -90,11 +90,26 @@ protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv)
   @Override
   @Test(timeout=10000)
   public void testByteBufferRead() throws Exception {}
+
+  @Ignore("Wrapped stream doesn't support ByteBufferPositionedReadable")
+  @Override
+  @Test(timeout=10000)
+  public void testPositionedReadWithByteBuffer() throws IOException {}
+
+  @Ignore("Wrapped stream doesn't support ByteBufferPositionedReadable")
+  @Override
+  @Test(timeout=10000)
+  public void testByteBufferReadFully() throws Exception {}
   
   @Ignore("ChecksumFSOutputSummer doesn't support Syncable")
   @Override
   @Test(timeout=10000)
   public void testSyncable() throws IOException {}
+
+  @Ignore("Wrapped stream doesn't support ByteBufferPositionedReadable")
+  @Override
+  @Test(timeout=10000)
+  public void testByteBufferPread() throws IOException {}
   
   @Ignore("ChecksumFSInputChecker doesn't support ByteBuffer read")
   @Override
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java
index 7e300777a37a1..3114ca18325a7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java
@@ -91,31 +91,46 @@ public void testSyncable() throws IOException {}
   @Test(timeout=10000)
   public void testPositionedRead() throws IOException {}
 
+  @Ignore("Wrapped stream doesn't support ByteBufferPositionedReadable")
+  @Override
+  @Test(timeout=10000)
+  public void testPositionedReadWithByteBuffer() throws IOException {}
+
+  @Ignore("Wrapped stream doesn't support ByteBufferPositionedReadable")
+  @Override
+  @Test(timeout=10000)
+  public void testByteBufferReadFully() throws Exception {}
+
   @Ignore("Wrapped stream doesn't support ReadFully")
   @Override
   @Test(timeout=10000)
   public void testReadFully() throws IOException {}
-  
+
   @Ignore("Wrapped stream doesn't support Seek")
   @Override
   @Test(timeout=10000)
   public void testSeek() throws IOException {}
-  
+
   @Ignore("Wrapped stream doesn't support ByteBufferRead")
   @Override
   @Test(timeout=10000)
   public void testByteBufferRead() throws IOException {}
-  
+
+  @Ignore("Wrapped stream doesn't support ByteBufferPositionedReadable")
+  @Override
+  @Test(timeout=10000)
+  public void testByteBufferPread() throws IOException {}
+
   @Ignore("Wrapped stream doesn't support ByteBufferRead, Seek")
   @Override
   @Test(timeout=10000)
   public void testCombinedOp() throws IOException {}
-  
+
   @Ignore("Wrapped stream doesn't support SeekToNewSource")
   @Override
   @Test(timeout=10000)
   public void testSeekToNewSource() throws IOException {}
-  
+
   @Ignore("Wrapped stream doesn't support HasEnhancedByteBufferAccess")
   @Override
   @Test(timeout=10000)
@@ -125,4 +140,4 @@ public void testHasEnhancedByteBufferAccess() throws IOException {}
   @Override
   @Test
   public void testUnbuffer() throws Exception {}
-}
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index a4bf4542d04d4..8375ffd9d5aaf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -46,12 +46,14 @@
 
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.ByteBufferPositionedReadable;
 import org.apache.hadoop.fs.ByteBufferReadable;
 import org.apache.hadoop.fs.ByteBufferUtil;
 import org.apache.hadoop.fs.CanSetDropBehind;
 import org.apache.hadoop.fs.CanSetReadahead;
 import org.apache.hadoop.fs.CanUnbuffer;
 import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
@@ -100,7 +102,8 @@
 @InterfaceAudience.Private
 public class DFSInputStream extends FSInputStream
     implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
-               HasEnhancedByteBufferAccess, CanUnbuffer, StreamCapabilities {
+               HasEnhancedByteBufferAccess, CanUnbuffer, StreamCapabilities,
+               ByteBufferPositionedReadable {
   @VisibleForTesting
   public static boolean tcpReadsDisabledForTesting = false;
   private long hedgedReadOpsLoopNumForTesting = 0;
@@ -318,8 +321,7 @@ private long fetchLocatedBlocksAndGetLastBlockLength(boolean refresh)
     }
 
     if (locatedBlocks != null) {
-      Iterator oldIter =
-          locatedBlocks.getLocatedBlocks().iterator();
+      Iterator oldIter = locatedBlocks.getLocatedBlocks().iterator();
       Iterator newIter = newInfo.getLocatedBlocks().iterator();
       while (oldIter.hasNext() && newIter.hasNext()) {
         if (!oldIter.next().getBlock().equals(newIter.next().getBlock())) {
@@ -642,7 +644,6 @@ private synchronized DatanodeInfo blockSeekTo(long target)
       //
       // Compute desired block
       //
-
       LocatedBlock targetBlock = getBlockAt(target);
 
       // update current position
@@ -1653,6 +1654,27 @@ public void reset() throws IOException {
     throw new IOException("Mark/reset not supported");
   }
 
+  @Override
+  public int read(long position, final ByteBuffer buf) throws IOException {
+    if (!buf.hasRemaining()) {
+      return 0;
+    }
+    return pread(position, buf);
+  }
+
+  @Override
+  public void readFully(long position, final ByteBuffer buf)
+      throws IOException {
+    int nread = 0;
+    while (buf.hasRemaining()) {
+      int nbytes = read(position + nread, buf);
+      if (nbytes < 0) {
+        throw new EOFException(FSExceptionMessages.EOF_IN_READ_FULLY);
+      }
+      nread += nbytes;
+    }
+  }
+
   /** Utility class to encapsulate data node info and its address. */
   static final class DNAddrPair {
     final DatanodeInfo info;
@@ -1871,6 +1893,7 @@ public boolean hasCapability(String capability) {
     case StreamCapabilities.READAHEAD:
     case StreamCapabilities.DROPBEHIND:
     case StreamCapabilities.UNBUFFER:
+    case StreamCapabilities.READBYTEBUFFER:
       return true;
     default:
       return false;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
index 18396c7855477..9b9ee6d962428 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
@@ -58,21 +58,11 @@ if(WIN32)
     # Omit unneeded headers.
     set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DWIN32_LEAN_AND_MEAN")
     set(OS_DIR ${CMAKE_SOURCE_DIR}/main/native/libhdfs/os/windows)
-
-    # IMPORTANT: OUT_DIR MUST be relative to maven's
-    # project.build.directory (=target) and match dist-copynativelibs
-    # in order to be in a release
-    set(OUT_DIR bin)
+    set(OUT_DIR target/bin)
 else()
     set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden")
-    # using old default behavior on GCC >= 10.0
-    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fcommon")
     set(OS_DIR ${CMAKE_SOURCE_DIR}/main/native/libhdfs/os/posix)
-
-    # IMPORTANT: OUT_DIR MUST be relative to maven's
-    # project.build.directory (=target) and match dist-copynativelibs
-    # in order to be in a release
-    set(OUT_DIR native/target/usr/local/lib)
+    set(OUT_DIR target/usr/local/lib)
 endif()
 
 # Configure JNI.
@@ -148,12 +138,12 @@ endif()
 
 add_subdirectory(main/native/libhdfs)
 add_subdirectory(main/native/libhdfs-tests)
+add_subdirectory(main/native/libhdfs-examples)
 
 # Temporary fix to disable Libhdfs++ build on older systems that do not support thread_local
 include(CheckCXXSourceCompiles)
 unset (THREAD_LOCAL_SUPPORTED CACHE)
-set (CMAKE_CXX_STANDARD 11)
-set (CMAKE_CXX_STANDARD_REQUIRED ON)
+set (CMAKE_REQUIRED_DEFINITIONS "-std=c++11")
 set (CMAKE_REQUIRED_LIBRARIES ${CMAKE_THREAD_LIBS_INIT})
 check_cxx_source_compiles(
     "#include 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/CMakeLists.txt
new file mode 100644
index 0000000000000..1d33639f3db68
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/CMakeLists.txt
@@ -0,0 +1,34 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
+
+include_directories(
+    ${CMAKE_CURRENT_SOURCE_DIR}/../libhdfs/include
+    ${GENERATED_JAVAH}
+    ${CMAKE_BINARY_DIR}
+    ${CMAKE_CURRENT_SOURCE_DIR}/../libhdfs
+    ${JNI_INCLUDE_DIRS}
+    ${OS_DIR}
+)
+
+add_executable(hdfs_read libhdfs_read.c)
+target_link_libraries(hdfs_read hdfs)
+
+add_executable(hdfs_write libhdfs_write.c)
+target_link_libraries(hdfs_write hdfs)
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/README.md b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/README.md
new file mode 100644
index 0000000000000..c962feba526c7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/README.md
@@ -0,0 +1,24 @@
+
+
+The files in this directory are purely meant to provide additional examples for how to use libhdfs. They are compiled as
+part of the build and are thus guaranteed to compile against the associated version of lidhdfs. However, no tests exists
+for these examples so their functionality is not guaranteed.
+
+The examples are written to run against a mini-dfs cluster. The script `test-libhdfs.sh` can setup a mini DFS cluster
+that the examples can run against. Again, none of this is tested and is thus not guaranteed to work.
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_read.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_read.c
similarity index 91%
rename from hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_read.c
rename to hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_read.c
index 4b90f2a4ab0be..419be1268b284 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_read.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_read.c
@@ -16,11 +16,16 @@
  * limitations under the License.
  */
 
-#include "hdfs/hdfs.h" 
+#include "hdfs/hdfs.h"
 
 #include 
 #include 
 
+/**
+ * An example of using libhdfs to read files. The usage of this file is as follows:
+ *
+ *   Usage: hdfs_read   
+ */
 int main(int argc, char **argv) {
     hdfsFS fs;
     const char *rfile = argv[1];
@@ -33,12 +38,12 @@ int main(int argc, char **argv) {
         fprintf(stderr, "Usage: hdfs_read   \n");
         exit(-1);
     }
-    
+
     fs = hdfsConnect("default", 0);
     if (!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
         exit(-1);
-    } 
+    }
 
     readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
     if (!readFile) {
@@ -51,13 +56,13 @@ int main(int argc, char **argv) {
     if(buffer == NULL) {
         return -2;
     }
-    
+
     // read from the file
     curSize = bufferSize;
     for (; curSize == bufferSize;) {
         curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
     }
-    
+
 
     free(buffer);
     hdfsCloseFile(fs, readFile);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_write.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_write.c
similarity index 93%
rename from hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_write.c
rename to hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_write.c
index c55c8e330c33b..8fbf87e524439 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_write.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_write.c
@@ -16,13 +16,18 @@
  * limitations under the License.
  */
 
-#include "hdfs/hdfs.h" 
+#include "hdfs/hdfs.h"
 
 #include 
 #include 
 #include 
 #include 
 
+/**
+ * An example of using libhdfs to write files. The usage of this file is as follows:
+ *
+ *   Usage: hdfs_write   
+ */
 int main(int argc, char **argv) {
     hdfsFS fs;
     const char *writeFileName = argv[1];
@@ -40,12 +45,12 @@ int main(int argc, char **argv) {
         fprintf(stderr, "Usage: hdfs_write   \n");
         exit(-1);
     }
-    
+
     fs = hdfsConnect("default", 0);
     if (!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
         exit(-1);
-    } 
+    }
 
     // sanity check
     if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
@@ -79,7 +84,7 @@ int main(int argc, char **argv) {
 
     // write to the file
     for (nrRemaining = fileTotalSize; nrRemaining > 0; nrRemaining -= bufferSize ) {
-      curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining; 
+      curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining;
       if ((written = hdfsWrite(fs, writeFile, (void*)buffer, curSize)) != curSize) {
         fprintf(stderr, "ERROR: hdfsWrite returned an error on write: %d\n", written);
         exit(-3);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/test-libhdfs.sh
similarity index 98%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh
rename to hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/test-libhdfs.sh
index 3407e9cf8e26a..e43b0a52903dd 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/test-libhdfs.sh
@@ -70,7 +70,7 @@ $HADOOP_HOME/share/hadoop/common/
 $HADOOP_HOME/share/hadoop/hdfs
 $HADOOP_HOME/share/hadoop/hdfs/lib/"
 
-for d in $JAR_DIRS; do 
+for d in $JAR_DIRS; do
   for j in $d/*.jar; do
     CLASSPATH=${CLASSPATH}:$j
   done;
@@ -114,14 +114,14 @@ LIB_JVM_DIR=`findlibjvm`
 echo  "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
 echo  LIB_JVM_DIR = $LIB_JVM_DIR
 echo  "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
-# Put delays to ensure hdfs is up and running and also shuts down 
+# Put delays to ensure hdfs is up and running and also shuts down
 # after the tests are complete
 rm $HDFS_TEST_CONF_DIR/core-site.xml
 
 $HADOOP_HOME/bin/hadoop jar $HDFS_TEST_JAR \
     org.apache.hadoop.test.MiniDFSClusterManager \
     -format -nnport 20300 -writeConfig $HDFS_TEST_CONF_DIR/core-site.xml \
-    > /tmp/libhdfs-test-cluster.out 2>&1 & 
+    > /tmp/libhdfs-test-cluster.out 2>&1 &
 
 MINI_CLUSTER_PID=$!
 for i in {1..15}; do
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
index 08fc030bbbbcc..f16cc9eb1b033 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
@@ -29,8 +29,8 @@ include_directories(
 
 add_library(native_mini_dfs
     native_mini_dfs.c
-    ../libhdfs/common/htable.c
     ../libhdfs/exception.c
+    ../libhdfs/jclasses.c
     ../libhdfs/jni_helper.c
     ${OS_DIR}/mutexes.c
     ${OS_DIR}/thread_local_storage.c
@@ -39,6 +39,3 @@ add_library(native_mini_dfs
 add_executable(test_native_mini_dfs test_native_mini_dfs.c)
 target_link_libraries(test_native_mini_dfs native_mini_dfs ${JAVA_JVM_LIBRARY})
 add_test(test_test_native_mini_dfs test_native_mini_dfs)
-
-add_executable(test_htable ../libhdfs/common/htable.c test_htable.c)
-target_link_libraries(test_htable ${OS_LINK_LIBRARIES})
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/hdfs_test.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/hdfs_test.h
index 0eab9a68aea7f..f00326317f24a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/hdfs_test.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/hdfs_test.h
@@ -49,6 +49,24 @@ extern  "C" {
      */
     void hdfsFileDisableDirectRead(struct hdfsFile_internal *file);
 
+    /**
+    * Determine if a file is using the "direct pread" optimization.
+    *
+    * @param file     The HDFS file
+    * @return         1 if the file is using the direct pread optimization,
+    *                 0 otherwise.
+    */
+    int hdfsFileUsesDirectPread(struct hdfsFile_internal *file);
+
+    /**
+     * Disable the direct pread optimization for a file.
+     *
+     * This is mainly provided for unit testing purposes.
+     *
+     * @param file     The HDFS file
+     */
+    void hdfsFileDisableDirectPread(struct hdfsFile_internal *file);
+
     /**
      * Disable domain socket security checks.
      *
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
index 6938109d53e4d..a69c6efe0c763 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
@@ -17,6 +17,7 @@
  */
 
 #include "exception.h"
+#include "jclasses.h"
 #include "jni_helper.h"
 #include "native_mini_dfs.h"
 #include "platform.h"
@@ -36,9 +37,7 @@
 
 #define MINIDFS_CLUSTER_BUILDER "org/apache/hadoop/hdfs/MiniDFSCluster$Builder"
 #define MINIDFS_CLUSTER "org/apache/hadoop/hdfs/MiniDFSCluster"
-#define HADOOP_CONF     "org/apache/hadoop/conf/Configuration"
 #define HADOOP_NAMENODE "org/apache/hadoop/hdfs/server/namenode/NameNode"
-#define JAVA_INETSOCKETADDRESS "java/net/InetSocketAddress"
 
 struct NativeMiniDfsCluster {
     /**
@@ -60,8 +59,7 @@ static int hdfsDisableDomainSocketSecurity(void)
       errno = EINTERNAL;
       return -1;
     }
-    jthr = invokeMethod(env, NULL, STATIC, NULL,
-            "org/apache/hadoop/net/unix/DomainSocket",
+    jthr = invokeMethod(env, NULL, STATIC, NULL, JC_DOMAIN_SOCKET,
             "disableBindPathValidation", "()V");
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -126,11 +124,6 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
             "nmdCreate: new Configuration");
         goto error;
     }
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                              "nmdCreate: Configuration::setBoolean");
-        goto error;
-    }
     // Disable 'minimum block size' -- it's annoying in tests.
     (*env)->DeleteLocalRef(env, jconfStr);
     jconfStr = NULL;
@@ -140,8 +133,9 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
                               "nmdCreate: new String");
         goto error;
     }
-    jthr = invokeMethod(env, NULL, INSTANCE, cobj, HADOOP_CONF,
-                        "setLong", "(Ljava/lang/String;J)V", jconfStr, 0LL);
+    jthr = invokeMethod(env, NULL, INSTANCE, cobj,
+            JC_CONFIGURATION, "setLong", "(Ljava/lang/String;J)V", jconfStr,
+            0LL);
     if (jthr) {
         printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                               "nmdCreate: Configuration::setLong");
@@ -163,7 +157,7 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
             goto error;
         }
     }
-    jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
+    jthr = findClassAndInvokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
             "format", "(Z)L" MINIDFS_CLUSTER_BUILDER ";", conf->doFormat);
     if (jthr) {
         printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
@@ -172,7 +166,7 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
     }
     (*env)->DeleteLocalRef(env, val.l);
     if (conf->webhdfsEnabled) {
-        jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
+        jthr = findClassAndInvokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
                         "nameNodeHttpPort", "(I)L" MINIDFS_CLUSTER_BUILDER ";",
                         conf->namenodeHttpPort);
         if (jthr) {
@@ -183,16 +177,16 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
         (*env)->DeleteLocalRef(env, val.l);
     }
     if (conf->numDataNodes) {
-        jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
+        jthr = findClassAndInvokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
                 "numDataNodes", "(I)L" MINIDFS_CLUSTER_BUILDER ";", conf->numDataNodes);
         if (jthr) {
             printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
                                   "Builder::numDataNodes");
             goto error;
         }
+        (*env)->DeleteLocalRef(env, val.l);
     }
-    (*env)->DeleteLocalRef(env, val.l);
-    jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
+    jthr = findClassAndInvokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
             "build", "()L" MINIDFS_CLUSTER ";");
     if (jthr) {
         printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -242,7 +236,7 @@ int nmdShutdown(struct NativeMiniDfsCluster* cl)
         fprintf(stderr, "nmdShutdown: getJNIEnv failed\n");
         return -EIO;
     }
-    jthr = invokeMethod(env, NULL, INSTANCE, cl->obj,
+    jthr = findClassAndInvokeMethod(env, NULL, INSTANCE, cl->obj,
             MINIDFS_CLUSTER, "shutdown", "()V");
     if (jthr) {
         printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -260,7 +254,7 @@ int nmdWaitClusterUp(struct NativeMiniDfsCluster *cl)
         fprintf(stderr, "nmdWaitClusterUp: getJNIEnv failed\n");
         return -EIO;
     }
-    jthr = invokeMethod(env, NULL, INSTANCE, cl->obj,
+    jthr = findClassAndInvokeMethod(env, NULL, INSTANCE, cl->obj,
             MINIDFS_CLUSTER, "waitClusterUp", "()V");
     if (jthr) {
         printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -282,7 +276,7 @@ int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl)
     }
     // Note: this will have to be updated when HA nativeMiniDfs clusters are
     // supported
-    jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj,
+    jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, cl->obj,
             MINIDFS_CLUSTER, "getNameNodePort", "()I");
     if (jthr) {
         printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -307,7 +301,7 @@ int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
         return -EIO;
     }
     // First get the (first) NameNode of the cluster
-    jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj, MINIDFS_CLUSTER,
+    jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, cl->obj, MINIDFS_CLUSTER,
                         "getNameNode", "()L" HADOOP_NAMENODE ";");
     if (jthr) {
         printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -318,8 +312,8 @@ int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
     jNameNode = jVal.l;
 
     // Then get the http address (InetSocketAddress) of the NameNode
-    jthr = invokeMethod(env, &jVal, INSTANCE, jNameNode, HADOOP_NAMENODE,
-                        "getHttpAddress", "()L" JAVA_INETSOCKETADDRESS ";");
+    jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, jNameNode, HADOOP_NAMENODE,
+                        "getHttpAddress", "()L" JAVA_NET_ISA ";");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                                     "nmdGetNameNodeHttpAddress: "
@@ -328,8 +322,8 @@ int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
     }
     jAddress = jVal.l;
 
-    jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
-                        JAVA_INETSOCKETADDRESS, "getPort", "()I");
+    jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, jAddress,
+                        JAVA_NET_ISA, "getPort", "()I");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                                     "nmdGetNameNodeHttpAddress: "
@@ -338,7 +332,7 @@ int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
     }
     *port = jVal.i;
 
-    jthr = invokeMethod(env, &jVal, INSTANCE, jAddress, JAVA_INETSOCKETADDRESS,
+    jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, jAddress, JAVA_NET_ISA,
                         "getHostName", "()Ljava/lang/String;");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_htable.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_htable.c
deleted file mode 100644
index 0c3861bfa7f9a..0000000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_htable.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "common/htable.h"
-#include "expect.h"
-#include "hdfs_test.h"
-
-#include 
-#include 
-#include 
-#include 
-#include 
-
-// Disable type cast and loss of precision warnings, because the test
-// manipulates void* values manually on purpose.
-#ifdef WIN32
-#pragma warning(disable: 4244 4306)
-#endif
-
-static uint32_t simple_hash(const void *key, uint32_t size)
-{
-    uintptr_t k = (uintptr_t)key;
-    return ((13 + k) * 6367) % size;
-}
-
-static int simple_compare(const void *a, const void *b)
-{
-    return a == b;
-}
-
-static void expect_102(void *f, void *k, void *v)
-{
-    int *found_102 = f;
-    uintptr_t key = (uintptr_t)k;
-    uintptr_t val = (uintptr_t)v;
-
-    if ((key == 2) && (val == 102)) {
-        *found_102 = 1;
-    } else {
-        abort();
-    }
-}
-
-static void *htable_pop_val(struct htable *ht, void *key)
-{
-    void *old_key, *old_val;
-
-    htable_pop(ht, key, &old_key, &old_val);
-    return old_val;
-}
-
-int main(void)
-{
-    struct htable *ht;
-    int found_102 = 0;
-
-    ht = htable_alloc(4, simple_hash, simple_compare);
-    EXPECT_INT_EQ(0, htable_used(ht));
-    EXPECT_INT_EQ(4, htable_capacity(ht));
-    EXPECT_NULL(htable_get(ht, (void*)123));
-    EXPECT_NULL(htable_pop_val(ht, (void*)123));
-    EXPECT_ZERO(htable_put(ht, (void*)123, (void*)456));
-    EXPECT_INT_EQ(456, (uintptr_t)htable_get(ht, (void*)123));
-    EXPECT_INT_EQ(456, (uintptr_t)htable_pop_val(ht, (void*)123));
-    EXPECT_NULL(htable_pop_val(ht, (void*)123));
-
-    // Enlarge the hash table
-    EXPECT_ZERO(htable_put(ht, (void*)1, (void*)101));
-    EXPECT_ZERO(htable_put(ht, (void*)2, (void*)102));
-    EXPECT_ZERO(htable_put(ht, (void*)3, (void*)103));
-    EXPECT_INT_EQ(3, htable_used(ht));
-    EXPECT_INT_EQ(8, htable_capacity(ht));
-    EXPECT_INT_EQ(102, (uintptr_t)htable_get(ht, (void*)2));
-    EXPECT_INT_EQ(101, (uintptr_t)htable_pop_val(ht, (void*)1));
-    EXPECT_INT_EQ(103, (uintptr_t)htable_pop_val(ht, (void*)3));
-    EXPECT_INT_EQ(1, htable_used(ht));
-    htable_visit(ht, expect_102, &found_102);
-    EXPECT_INT_EQ(1, found_102);
-    htable_free(ht);
-
-    fprintf(stderr, "SUCCESS.\n");
-    return EXIT_SUCCESS;
-}
-
-// vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c
index d69aa37794848..dd3e122695669 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c
@@ -16,8 +16,10 @@
  * limitations under the License.
  */
 
-#include "hdfs/hdfs.h" 
-#include "hdfs_test.h" 
+#include "expect.h"
+#include "hdfs/hdfs.h"
+#include "hdfs_test.h"
+#include "native_mini_dfs.h"
 #include "platform.h"
 
 #include 
@@ -59,7 +61,18 @@ void permission_disp(short permissions, char *rtr) {
       strncpy(rtr, perm, 3);
       rtr+=3;
     }
-} 
+}
+
+/**
+ * Shutdown and free the given mini cluster, and then exit with the provided exit_code. This method is meant to be
+ * called with a non-zero exit code, which is why we ignore the return status of calling MiniDFSCluster#shutdown since
+ * the process is going to fail anyway.
+ */
+void shutdown_and_exit(struct NativeMiniDfsCluster* cl, int exit_code) {
+    nmdShutdown(cl);
+    nmdFree(cl);
+    exit(exit_code);
+}
 
 int main(int argc, char **argv) {
     const char *writePath = "/tmp/testfile.txt";
@@ -75,9 +88,9 @@ int main(int argc, char **argv) {
     const char *userPath = "/tmp/usertestfile.txt";
 
     char buffer[32], buffer2[256], rdbuffer[32];
-    tSize num_written_bytes, num_read_bytes;
+    tSize num_written_bytes, num_read_bytes, num_pread_bytes;
     hdfsFS fs, lfs;
-    hdfsFile writeFile, readFile, localFile, appendFile, userFile;
+    hdfsFile writeFile, readFile, preadFile, localFile, appendFile, userFile;
     tOffset currentPos, seekPos;
     int exists, totalResult, result, numEntries, i, j;
     const char *resp;
@@ -88,16 +101,47 @@ int main(int argc, char **argv) {
     short newPerm = 0666;
     tTime newMtime, newAtime;
 
-    fs = hdfsConnectNewInstance("default", 0);
+    // Create and start the mini cluster
+    struct NativeMiniDfsCluster* cl;
+    struct NativeMiniDfsConf conf = {
+        1, /* doFormat */
+    };
+
+    cl = nmdCreate(&conf);
+    EXPECT_NONNULL(cl);
+    EXPECT_ZERO(nmdWaitClusterUp(cl));
+    tPort port;
+    port = (tPort) nmdGetNameNodePort(cl);
+
+    // Create a hdfs connection to the mini cluster
+    struct hdfsBuilder *bld;
+    bld = hdfsNewBuilder();
+    EXPECT_NONNULL(bld);
+
+    hdfsBuilderSetForceNewInstance(bld);
+    hdfsBuilderSetNameNode(bld, "localhost");
+    hdfsBuilderSetNameNodePort(bld, port);
+    // The HDFS append tests require setting this property otherwise the tests fail with:
+    //
+    //     IOException: Failed to replace a bad datanode on the existing pipeline due to no more good datanodes being
+    //     available to try. The current failed datanode replacement policy is DEFAULT, and a client may configure this
+    //     via 'dfs.client.block.write.replace-datanode-on-failure.policy' in its configuration.
+    //
+    // It seems that when operating against a mini DFS cluster, some HDFS append tests require setting this property
+    // (for example, see TestFileAppend#testMultipleAppends)
+    hdfsBuilderConfSetStr(bld, "dfs.client.block.write.replace-datanode-on-failure.enable", "false");
+
+    fs = hdfsBuilderConnect(bld);
+
     if(!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
-        exit(-1);
+        shutdown_and_exit(cl, -1);
     } 
  
     lfs = hdfsConnectNewInstance(NULL, 0);
     if(!lfs) {
         fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
-        exit(-1);
+        shutdown_and_exit(cl, -1);
     } 
 
     {
@@ -106,7 +150,7 @@ int main(int argc, char **argv) {
         writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
         if(!writeFile) {
             fprintf(stderr, "Failed to open %s for writing!\n", writePath);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
         num_written_bytes =
@@ -115,7 +159,7 @@ int main(int argc, char **argv) {
         if (num_written_bytes != strlen(fileContents) + 1) {
           fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
                   (int)(strlen(fileContents) + 1), (int)num_written_bytes);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
 
@@ -124,19 +168,19 @@ int main(int argc, char **argv) {
             fprintf(stderr, 
                     "Failed to get current file position correctly! Got %" PRId64 "!\n",
                     currentPos);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
 
         if (hdfsFlush(fs, writeFile)) {
             fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Flushed %s successfully!\n", writePath); 
 
         if (hdfsHFlush(fs, writeFile)) {
             fprintf(stderr, "Failed to 'hflush' %s\n", writePath);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "HFlushed %s successfully!\n", writePath);
 
@@ -150,20 +194,20 @@ int main(int argc, char **argv) {
 
         if (exists) {
           fprintf(stderr, "Failed to validate existence of %s\n", readPath);
-          exit(-1);
+          shutdown_and_exit(cl, -1);
         }
 
         readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
         if (!readFile) {
             fprintf(stderr, "Failed to open %s for reading!\n", readPath);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
 
         if (!hdfsFileIsOpenForRead(readFile)) {
             fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
                     "with O_RDONLY, and it did not show up as 'open for "
                     "read'\n");
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
 
         fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
@@ -171,7 +215,7 @@ int main(int argc, char **argv) {
         seekPos = 1;
         if(hdfsSeek(fs, readFile, seekPos)) {
             fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
 
         currentPos = -1;
@@ -179,14 +223,14 @@ int main(int argc, char **argv) {
             fprintf(stderr, 
                     "Failed to get current file position correctly! Got %" PRId64 "!\n",
                     currentPos);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
 
         if (!hdfsFileUsesDirectRead(readFile)) {
           fprintf(stderr, "Direct read support incorrectly not detected "
                   "for HDFS filesystem\n");
-          exit(-1);
+          shutdown_and_exit(cl, -1);
         }
 
         fprintf(stderr, "Direct read support detected for HDFS\n");
@@ -194,7 +238,7 @@ int main(int argc, char **argv) {
         // Test the direct read path
         if(hdfsSeek(fs, readFile, 0)) {
             fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         memset(buffer, 0, sizeof(buffer));
         num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
@@ -202,30 +246,41 @@ int main(int argc, char **argv) {
         if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
             fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
                     fileContents, buffer, num_read_bytes);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Read (direct) following %d bytes:\n%s\n",
                 num_read_bytes, buffer);
+        memset(buffer, 0, strlen(fileContents + 1));
         if (hdfsSeek(fs, readFile, 0L)) {
             fprintf(stderr, "Failed to seek to file start!\n");
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
 
         // Disable the direct read path so that we really go through the slow
         // read path
         hdfsFileDisableDirectRead(readFile);
 
-        num_read_bytes = hdfsRead(fs, readFile, (void*)buffer, 
-                sizeof(buffer));
-        fprintf(stderr, "Read following %d bytes:\n%s\n", 
-                num_read_bytes, buffer);
+        if (hdfsFileUsesDirectRead(readFile)) {
+            fprintf(stderr, "Disabled direct reads, but it is still enabled");
+            shutdown_and_exit(cl, -1);
+        }
 
-        memset(buffer, 0, strlen(fileContents + 1));
+        if (!hdfsFileUsesDirectPread(readFile)) {
+            fprintf(stderr, "Disabled direct reads, but direct preads was "
+                            "disabled as well");
+            shutdown_and_exit(cl, -1);
+        }
 
-        num_read_bytes = hdfsPread(fs, readFile, 0, (void*)buffer, 
+        num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
                 sizeof(buffer));
-        fprintf(stderr, "Read following %d bytes:\n%s\n", 
+        if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+            fprintf(stderr, "Failed to read. Expected %s but got %s (%d bytes)\n",
+                    fileContents, buffer, num_read_bytes);
+            shutdown_and_exit(cl, -1);
+        }
+        fprintf(stderr, "Read following %d bytes:\n%s\n",
                 num_read_bytes, buffer);
+        memset(buffer, 0, strlen(fileContents + 1));
 
         hdfsCloseFile(fs, readFile);
 
@@ -233,7 +288,7 @@ int main(int argc, char **argv) {
         localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
         if(!localFile) {
             fprintf(stderr, "Failed to open %s for writing!\n", writePath);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
 
         num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents,
@@ -245,12 +300,222 @@ int main(int argc, char **argv) {
         if (hdfsFileUsesDirectRead(localFile)) {
           fprintf(stderr, "Direct read support incorrectly detected for local "
                   "filesystem\n");
-          exit(-1);
+          shutdown_and_exit(cl, -1);
+        }
+
+        hdfsCloseFile(lfs, localFile);
+    }
+
+    {
+        // Pread tests
+
+        exists = hdfsExists(fs, readPath);
+
+        if (exists) {
+            fprintf(stderr, "Failed to validate existence of %s\n", readPath);
+            shutdown_and_exit(cl, -1);
+        }
+
+        preadFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
+        if (!preadFile) {
+            fprintf(stderr, "Failed to open %s for reading!\n", readPath);
+            shutdown_and_exit(cl, -1);
+        }
+
+        if (!hdfsFileIsOpenForRead(preadFile)) {
+            fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
+                            "with O_RDONLY, and it did not show up as 'open for "
+                            "read'\n");
+            shutdown_and_exit(cl, -1);
+        }
+
+        fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, preadFile));
+
+        num_pread_bytes = hdfsPread(fs, preadFile, 0, (void*)buffer, sizeof(buffer));
+        if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+            fprintf(stderr, "Failed to pread (direct). Expected %s but got %s (%d bytes)\n",
+                    fileContents, buffer, num_read_bytes);
+            shutdown_and_exit(cl, -1);
+        }
+        fprintf(stderr, "Pread (direct) following %d bytes:\n%s\n",
+                num_pread_bytes, buffer);
+        memset(buffer, 0, strlen(fileContents + 1));
+        if (hdfsTell(fs, preadFile) != 0) {
+            fprintf(stderr, "Pread changed position of file\n");
+            shutdown_and_exit(cl, -1);
+        }
+
+        // Test pread midway through the file rather than at the beginning
+        const char *fileContentsChunk = "World!";
+        num_pread_bytes = hdfsPread(fs, preadFile, 7, (void*)buffer, sizeof(buffer));
+        if (strncmp(fileContentsChunk, buffer, strlen(fileContentsChunk)) != 0) {
+            fprintf(stderr, "Failed to pread (direct). Expected %s but got %s (%d bytes)\n",
+                    fileContentsChunk, buffer, num_read_bytes);
+            shutdown_and_exit(cl, -1);
+        }
+        fprintf(stderr, "Pread (direct) following %d bytes:\n%s\n", num_pread_bytes, buffer);
+        memset(buffer, 0, strlen(fileContents + 1));
+        if (hdfsTell(fs, preadFile) != 0) {
+            fprintf(stderr, "Pread changed position of file\n");
+            shutdown_and_exit(cl, -1);
+        }
+
+        // hdfsPreadFully (direct) test
+        if (hdfsPreadFully(fs, preadFile, 0, (void*)buffer,
+                (tSize)(strlen(fileContents) + 1))) {
+            fprintf(stderr, "Failed to preadFully (direct).");
+            shutdown_and_exit(cl, -1);
+        }
+        if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+            fprintf(stderr, "Failed to preadFully (direct). Expected %s but "
+                            "got %s\n", fileContents, buffer);
+            shutdown_and_exit(cl, -1);
+        }
+        fprintf(stderr, "PreadFully (direct) following %d bytes:\n%s\n",
+                num_pread_bytes, buffer);
+        memset(buffer, 0, strlen(fileContents + 1));
+        if (hdfsTell(fs, preadFile) != 0) {
+            fprintf(stderr, "PreadFully changed position of file\n");
+            shutdown_and_exit(cl, -1);
+        }
+
+        // Disable the direct pread path so that we really go through the slow
+        // read path
+        hdfsFileDisableDirectPread(preadFile);
+
+        if (hdfsFileUsesDirectPread(preadFile)) {
+            fprintf(stderr, "Disabled direct preads, but it is still enabled");
+            shutdown_and_exit(cl, -1);
+        }
+
+        if (!hdfsFileUsesDirectRead(preadFile)) {
+            fprintf(stderr, "Disabled direct preads, but direct read was "
+                            "disabled as well");
+            shutdown_and_exit(cl, -1);
+        }
+
+        num_pread_bytes = hdfsPread(fs, preadFile, 0, (void*)buffer, sizeof(buffer));
+        if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+            fprintf(stderr, "Failed to pread. Expected %s but got %s (%d bytes)\n",
+                    fileContents, buffer, num_pread_bytes);
+            shutdown_and_exit(cl, -1);
+        }
+        fprintf(stderr, "Pread following %d bytes:\n%s\n", num_pread_bytes, buffer);
+        memset(buffer, 0, strlen(fileContents + 1));
+        if (hdfsTell(fs, preadFile) != 0) {
+            fprintf(stderr, "Pread changed position of file\n");
+            shutdown_and_exit(cl, -1);
+        }
+
+        // Test pread midway through the file rather than at the beginning
+        num_pread_bytes = hdfsPread(fs, preadFile, 7, (void*)buffer, sizeof(buffer));
+        if (strncmp(fileContentsChunk, buffer, strlen(fileContentsChunk)) != 0) {
+            fprintf(stderr, "Failed to pread. Expected %s but got %s (%d bytes)\n",
+                    fileContentsChunk, buffer, num_read_bytes);
+            shutdown_and_exit(cl, -1);
+        }
+        fprintf(stderr, "Pread following %d bytes:\n%s\n", num_pread_bytes, buffer);
+        memset(buffer, 0, strlen(fileContents + 1));
+        if (hdfsTell(fs, preadFile) != 0) {
+            fprintf(stderr, "Pread changed position of file\n");
+            shutdown_and_exit(cl, -1);
+        }
+
+        // hdfsPreadFully test
+        if (hdfsPreadFully(fs, preadFile, 0, (void*)buffer,
+                            (tSize)(strlen(fileContents) + 1))) {
+            fprintf(stderr, "Failed to preadFully.");
+            shutdown_and_exit(cl, -1);
+        }
+        if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+            fprintf(stderr, "Failed to preadFully. Expected %s but got %s\n",
+                    fileContents, buffer);
+            shutdown_and_exit(cl, -1);
+        }
+        fprintf(stderr, "PreadFully following %d bytes:\n%s\n",
+                num_pread_bytes, buffer);
+        memset(buffer, 0, strlen(fileContents + 1));
+        if (hdfsTell(fs, preadFile) != 0) {
+            fprintf(stderr, "PreadFully changed position of file\n");
+            shutdown_and_exit(cl, -1);
+        }
+
+        hdfsCloseFile(fs, preadFile);
+
+        // Test correct behaviour for unsupported filesystems
+        localFile = hdfsOpenFile(lfs, writePath, O_RDONLY, 0, 0, 0);
+
+        if (hdfsFileUsesDirectPread(localFile)) {
+            fprintf(stderr, "Direct pread support incorrectly detected for local "
+                            "filesystem\n");
+            shutdown_and_exit(cl, -1);
         }
 
         hdfsCloseFile(lfs, localFile);
     }
 
+
+    {
+        // HDFS Open File Builder tests
+
+        exists = hdfsExists(fs, readPath);
+
+        if (exists) {
+            fprintf(stderr, "Failed to validate existence of %s\n", readPath);
+            shutdown_and_exit(cl, -1);
+        }
+
+        hdfsOpenFileBuilder *builder;
+        builder = hdfsOpenFileBuilderAlloc(fs, readPath);
+        hdfsOpenFileBuilderOpt(builder, "hello", "world");
+
+        hdfsOpenFileFuture *future;
+        future = hdfsOpenFileBuilderBuild(builder);
+
+        readFile = hdfsOpenFileFutureGet(future);
+        if (!hdfsOpenFileFutureCancel(future, 0)) {
+            fprintf(stderr, "Cancel on a completed Future should return false");
+            shutdown_and_exit(cl, -1);
+        }
+        hdfsOpenFileFutureFree(future);
+
+        memset(buffer, 0, sizeof(buffer));
+        num_read_bytes = hdfsRead(fs, readFile, (void *) buffer,
+                                  sizeof(buffer));
+        if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+            fprintf(stderr,
+                    "Failed to read. Expected %s but got %s (%d bytes)\n",
+                    fileContents, buffer, num_read_bytes);
+            shutdown_and_exit(cl, -1);
+        }
+        hdfsCloseFile(fs, readFile);
+
+        builder = hdfsOpenFileBuilderAlloc(fs, readPath);
+        hdfsOpenFileBuilderOpt(builder, "hello", "world");
+
+        future = hdfsOpenFileBuilderBuild(builder);
+
+        readFile = hdfsOpenFileFutureGetWithTimeout(future, 1, jDays);
+        if (!hdfsOpenFileFutureCancel(future, 0)) {
+            fprintf(stderr, "Cancel on a completed Future should return "
+                            "false");
+            shutdown_and_exit(cl, -1);
+        }
+        hdfsOpenFileFutureFree(future);
+
+        memset(buffer, 0, sizeof(buffer));
+        num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
+                                  sizeof(buffer));
+        if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+            fprintf(stderr, "Failed to read. Expected %s but got "
+                            "%s (%d bytes)\n", fileContents, buffer,
+                            num_read_bytes);
+            shutdown_and_exit(cl, -1);
+        }
+        memset(buffer, 0, strlen(fileContents + 1));
+        hdfsCloseFile(fs, readFile);
+    }
+
     totalResult = 0;
     result = 0;
     {
@@ -425,7 +690,7 @@ int main(int argc, char **argv) {
       appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
       if(!appendFile) {
         fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
-        exit(-1);
+        shutdown_and_exit(cl, -1);
       }
       fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
 
@@ -435,10 +700,10 @@ int main(int argc, char **argv) {
       fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
 
       if (hdfsFlush(fs, appendFile)) {
-        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
-        exit(-1);
+        fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
+        shutdown_and_exit(cl, -1);
         }
-      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 
+      fprintf(stderr, "Flushed %s successfully!\n", appendPath);
 
       hdfsCloseFile(fs, appendFile);
 
@@ -446,7 +711,7 @@ int main(int argc, char **argv) {
       appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0);
       if(!appendFile) {
         fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
-        exit(-1);
+        shutdown_and_exit(cl, -1);
       }
       fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
 
@@ -456,10 +721,10 @@ int main(int argc, char **argv) {
       fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
 
       if (hdfsFlush(fs, appendFile)) {
-        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
-        exit(-1);
+        fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
+        shutdown_and_exit(cl, -1);
       }
-      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 
+      fprintf(stderr, "Flushed %s successfully!\n", appendPath);
 
       hdfsCloseFile(fs, appendFile);
 
@@ -472,11 +737,11 @@ int main(int argc, char **argv) {
       readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
       if (!readFile) {
         fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
-        exit(-1);
+        shutdown_and_exit(cl, -1);
       }
 
       num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
-      fprintf(stderr, "Read following %d bytes:\n%s\n", 
+      fprintf(stderr, "Read following %d bytes:\n%s\n",
               num_read_bytes, rdbuffer);
 
       fprintf(stderr, "read == Hello, World %s\n", ((result = (strcmp(rdbuffer, "Hello, World"))) == 0 ? "Success!" : "Failed!"));
@@ -496,16 +761,16 @@ int main(int argc, char **argv) {
       // the actual fs user capabilities. Thus just create a file and read
       // the owner is correct.
 
-      fs = hdfsConnectAsUserNewInstance("default", 0, tuser);
+      fs = hdfsConnectAsUserNewInstance("localhost", port, tuser);
       if(!fs) {
         fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
-        exit(-1);
+        shutdown_and_exit(cl, -1);
       } 
 
         userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0);
         if(!userFile) {
             fprintf(stderr, "Failed to open %s for writing!\n", userPath);
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Opened %s for writing successfully...\n", userPath);
 
@@ -515,7 +780,7 @@ int main(int argc, char **argv) {
 
         if (hdfsFlush(fs, userFile)) {
             fprintf(stderr, "Failed to 'flush' %s\n", userPath); 
-            exit(-1);
+            shutdown_and_exit(cl, -1);
         }
         fprintf(stderr, "Flushed %s successfully!\n", userPath); 
 
@@ -528,6 +793,9 @@ int main(int argc, char **argv) {
     
     totalResult += (hdfsDisconnect(fs) != 0);
 
+    EXPECT_ZERO(nmdShutdown(cl));
+    nmdFree(cl);
+
     if (totalResult != 0) {
         return -1;
     } else {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
index 08765f5e28046..a7fb311125110 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
@@ -35,7 +35,7 @@ hadoop_add_dual_library(hdfs
     exception.c
     jni_helper.c
     hdfs.c
-    common/htable.c
+    jclasses.c
     ${OS_DIR}/mutexes.c
     ${OS_DIR}/thread_local_storage.c
 )
@@ -55,11 +55,9 @@ set_target_properties(hdfs PROPERTIES
     SOVERSION ${LIBHDFS_VERSION})
 
 build_libhdfs_test(test_libhdfs_ops hdfs_static test_libhdfs_ops.c)
-link_libhdfs_test(test_libhdfs_ops hdfs_static ${JAVA_JVM_LIBRARY})
-build_libhdfs_test(test_libhdfs_reads hdfs_static test_libhdfs_read.c)
-link_libhdfs_test(test_libhdfs_reads hdfs_static ${JAVA_JVM_LIBRARY})
-build_libhdfs_test(test_libhdfs_write hdfs_static test_libhdfs_write.c)
-link_libhdfs_test(test_libhdfs_write hdfs_static ${JAVA_JVM_LIBRARY})
+link_libhdfs_test(test_libhdfs_ops hdfs_static native_mini_dfs ${JAVA_JVM_LIBRARY})
+add_libhdfs_test(test_libhdfs_ops hdfs_static)
+
 build_libhdfs_test(test_libhdfs_threaded hdfs_static expect.c test_libhdfs_threaded.c ${OS_DIR}/thread.c)
 link_libhdfs_test(test_libhdfs_threaded hdfs_static native_mini_dfs)
 add_libhdfs_test(test_libhdfs_threaded hdfs_static)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.c
deleted file mode 100644
index 50c89ea9cf707..0000000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "common/htable.h"
-
-#include 
-#include 
-#include 
-#include 
-#include 
-
-struct htable_pair {
-    void *key;
-    void *val;
-};
-
-/**
- * A hash table which uses linear probing.
- */
-struct htable {
-    uint32_t capacity;
-    uint32_t used;
-    htable_hash_fn_t hash_fun;
-    htable_eq_fn_t eq_fun;
-    struct htable_pair *elem;
-};
-
-/**
- * An internal function for inserting a value into the hash table.
- *
- * Note: this function assumes that you have made enough space in the table.
- *
- * @param nelem         The new element to insert.
- * @param capacity      The capacity of the hash table.
- * @param hash_fun      The hash function to use.
- * @param key           The key to insert.
- * @param val           The value to insert.
- */
-static void htable_insert_internal(struct htable_pair *nelem, 
-        uint32_t capacity, htable_hash_fn_t hash_fun, void *key,
-        void *val)
-{
-    uint32_t i;
-
-    i = hash_fun(key, capacity);
-    while (1) {
-        if (!nelem[i].key) {
-            nelem[i].key = key;
-            nelem[i].val = val;
-            return;
-        }
-        i++;
-        if (i == capacity) {
-            i = 0;
-        }
-    }
-}
-
-static int htable_realloc(struct htable *htable, uint32_t new_capacity)
-{
-    struct htable_pair *nelem;
-    uint32_t i, old_capacity = htable->capacity;
-    htable_hash_fn_t hash_fun = htable->hash_fun;
-
-    nelem = calloc(new_capacity, sizeof(struct htable_pair));
-    if (!nelem) {
-        return ENOMEM;
-    }
-    for (i = 0; i < old_capacity; i++) {
-        struct htable_pair *pair = htable->elem + i;
-        if (pair->key) {
-            htable_insert_internal(nelem, new_capacity, hash_fun,
-                                   pair->key, pair->val);
-        }
-    }
-    free(htable->elem);
-    htable->elem = nelem;
-    htable->capacity = new_capacity;
-    return 0;
-}
-
-static uint32_t round_up_to_power_of_2(uint32_t i)
-{
-    if (i == 0) {
-        return 1;
-    }
-    i--;
-    i |= i >> 1;
-    i |= i >> 2;
-    i |= i >> 4;
-    i |= i >> 8;
-    i |= i >> 16;
-    i++;
-    return i;
-}
-
-struct htable *htable_alloc(uint32_t size,
-                htable_hash_fn_t hash_fun, htable_eq_fn_t eq_fun)
-{
-    struct htable *htable;
-
-    htable = calloc(1, sizeof(*htable));
-    if (!htable) {
-        return NULL;
-    }
-    size = round_up_to_power_of_2(size);
-    if (size < HTABLE_MIN_SIZE) {
-        size = HTABLE_MIN_SIZE;
-    }
-    htable->hash_fun = hash_fun;
-    htable->eq_fun = eq_fun;
-    htable->used = 0;
-    if (htable_realloc(htable, size)) {
-        free(htable);
-        return NULL;
-    }
-    return htable;
-}
-
-void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx)
-{
-    uint32_t i;
-
-    for (i = 0; i != htable->capacity; ++i) {
-        struct htable_pair *elem = htable->elem + i;
-        if (elem->key) {
-            fun(ctx, elem->key, elem->val);
-        }
-    }
-}
-
-void htable_free(struct htable *htable)
-{
-    if (htable) {
-        free(htable->elem);
-        free(htable);
-    }
-}
-
-int htable_put(struct htable *htable, void *key, void *val)
-{
-    int ret;
-    uint32_t nused;
-
-    // NULL is not a valid key value.
-    // This helps us implement htable_get_internal efficiently, since we know
-    // that we can stop when we encounter the first NULL key.
-    if (!key) {
-        return EINVAL;
-    }
-    // NULL is not a valid value.  Otherwise the results of htable_get would
-    // be confusing (does a NULL return mean entry not found, or that the
-    // entry was found and was NULL?) 
-    if (!val) {
-        return EINVAL;
-    }
-    // Re-hash if we have used more than half of the hash table
-    nused = htable->used + 1;
-    if (nused >= (htable->capacity / 2)) {
-        ret = htable_realloc(htable, htable->capacity * 2);
-        if (ret)
-            return ret;
-    }
-    htable_insert_internal(htable->elem, htable->capacity,
-                                htable->hash_fun, key, val);
-    htable->used++;
-    return 0;
-}
-
-static int htable_get_internal(const struct htable *htable,
-                               const void *key, uint32_t *out)
-{
-    uint32_t start_idx, idx;
-
-    start_idx = htable->hash_fun(key, htable->capacity);
-    idx = start_idx;
-    while (1) {
-        struct htable_pair *pair = htable->elem + idx;
-        if (!pair->key) {
-            // We always maintain the invariant that the entries corresponding
-            // to a given key are stored in a contiguous block, not separated
-            // by any NULLs.  So if we encounter a NULL, our search is over.
-            return ENOENT;
-        } else if (htable->eq_fun(pair->key, key)) {
-            *out = idx;
-            return 0;
-        }
-        idx++;
-        if (idx == htable->capacity) {
-            idx = 0;
-        }
-        if (idx == start_idx) {
-            return ENOENT;
-        }
-    }
-}
-
-void *htable_get(const struct htable *htable, const void *key)
-{
-    uint32_t idx;
-
-    if (htable_get_internal(htable, key, &idx)) {
-        return NULL;
-    }
-    return htable->elem[idx].val;
-}
-
-void htable_pop(struct htable *htable, const void *key,
-                void **found_key, void **found_val)
-{
-    uint32_t hole, i;
-    const void *nkey;
-
-    if (htable_get_internal(htable, key, &hole)) {
-        *found_key = NULL;
-        *found_val = NULL;
-        return;
-    }
-    i = hole;
-    htable->used--;
-    // We need to maintain the compactness invariant used in
-    // htable_get_internal.  This invariant specifies that the entries for any
-    // given key are never separated by NULLs (although they may be separated
-    // by entries for other keys.)
-    while (1) {
-        i++;
-        if (i == htable->capacity) {
-            i = 0;
-        }
-        nkey = htable->elem[i].key;
-        if (!nkey) {
-            *found_key = htable->elem[hole].key;
-            *found_val = htable->elem[hole].val;
-            htable->elem[hole].key = NULL;
-            htable->elem[hole].val = NULL;
-            return;
-        } else if (htable->eq_fun(key, nkey)) {
-            htable->elem[hole].key = htable->elem[i].key;
-            htable->elem[hole].val = htable->elem[i].val;
-            hole = i;
-        }
-    }
-}
-
-uint32_t htable_used(const struct htable *htable)
-{
-    return htable->used;
-}
-
-uint32_t htable_capacity(const struct htable *htable)
-{
-    return htable->capacity;
-}
-
-uint32_t ht_hash_string(const void *str, uint32_t max)
-{
-    const char *s = str;
-    uint32_t hash = 0;
-
-    while (*s) {
-        hash = (hash * 31) + *s;
-        s++;
-    }
-    return hash % max;
-}
-
-int ht_compare_string(const void *a, const void *b)
-{
-    return strcmp(a, b) == 0;
-}
-
-// vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.h
deleted file mode 100644
index 33f1229051582..0000000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef HADOOP_CORE_COMMON_HASH_TABLE
-#define HADOOP_CORE_COMMON_HASH_TABLE
-
-#include 
-#include 
-#include 
-
-#define HTABLE_MIN_SIZE 4
-
-struct htable;
-
-/**
- * An HTable hash function.
- *
- * @param key       The key.
- * @param capacity  The total capacity.
- *
- * @return          The hash slot.  Must be less than the capacity.
- */
-typedef uint32_t (*htable_hash_fn_t)(const void *key, uint32_t capacity);
-
-/**
- * An HTable equality function.  Compares two keys.
- *
- * @param a         First key.
- * @param b         Second key.
- *
- * @return          nonzero if the keys are equal.
- */
-typedef int (*htable_eq_fn_t)(const void *a, const void *b);
-
-/**
- * Allocate a new hash table.
- *
- * @param capacity  The minimum suggested starting capacity.
- * @param hash_fun  The hash function to use in this hash table.
- * @param eq_fun    The equals function to use in this hash table.
- *
- * @return          The new hash table on success; NULL on OOM.
- */
-struct htable *htable_alloc(uint32_t capacity, htable_hash_fn_t hash_fun,
-                            htable_eq_fn_t eq_fun);
-
-typedef void (*visitor_fn_t)(void *ctx, void *key, void *val);
-
-/**
- * Visit all of the entries in the hash table.
- *
- * @param htable    The hash table.
- * @param fun       The callback function to invoke on each key and value.
- * @param ctx       Context pointer to pass to the callback.
- */
-void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx);
-
-/**
- * Free the hash table.
- *
- * It is up the calling code to ensure that the keys and values inside the
- * table are de-allocated, if that is necessary.
- *
- * @param htable    The hash table.
- */
-void htable_free(struct htable *htable);
-
-/**
- * Add an entry to the hash table.
- *
- * @param htable    The hash table.
- * @param key       The key to add.  This cannot be NULL.
- * @param fun       The value to add.  This cannot be NULL.
- *
- * @return          0 on success;
- *                  EEXIST if the value already exists in the table;
- *                  ENOMEM if there is not enough memory to add the element.
- *                  EFBIG if the hash table has too many entries to fit in 32
- *                      bits.
- */
-int htable_put(struct htable *htable, void *key, void *val);
-
-/**
- * Get an entry from the hash table.
- *
- * @param htable    The hash table.
- * @param key       The key to find.
- *
- * @return          NULL if there is no such entry; the entry otherwise.
- */
-void *htable_get(const struct htable *htable, const void *key);
-
-/**
- * Get an entry from the hash table and remove it.
- *
- * @param htable    The hash table.
- * @param key       The key for the entry find and remove.
- * @param found_key (out param) NULL if the entry was not found; the found key
- *                      otherwise.
- * @param found_val (out param) NULL if the entry was not found; the found
- *                      value otherwise.
- */
-void htable_pop(struct htable *htable, const void *key,
-                void **found_key, void **found_val);
-
-/**
- * Get the number of entries used in the hash table.
- *
- * @param htable    The hash table.
- *
- * @return          The number of entries used in the hash table.
- */
-uint32_t htable_used(const struct htable *htable);
-
-/**
- * Get the capacity of the hash table.
- *
- * @param htable    The hash table.
- *
- * @return          The capacity of the hash table.
- */
-uint32_t htable_capacity(const struct htable *htable);
-
-/**
- * Hash a string.
- *
- * @param str       The string.
- * @param max       Maximum hash value
- *
- * @return          A number less than max.
- */
-uint32_t ht_hash_string(const void *str, uint32_t max);
-
-/**
- * Compare two strings.
- *
- * @param a         The first string.
- * @param b         The second string.
- *
- * @return          1 if the strings are identical; 0 otherwise.
- */
-int ht_compare_string(const void *a, const void *b);
-
-#endif
-
-// vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
index bcbb851534d88..fec9a103b4e23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
@@ -18,6 +18,7 @@
 
 #include "exception.h"
 #include "hdfs/hdfs.h"
+#include "jclasses.h"
 #include "jni_helper.h"
 #include "platform.h"
 
@@ -129,9 +130,8 @@ static char* getExceptionUtilString(JNIEnv *env, jthrowable exc, char *methodNam
     jvalue jVal;
     jstring jStr = NULL;
     char *excString = NULL;
-    jthr = invokeMethod(env, &jVal, STATIC, NULL,
-        "org/apache/commons/lang3/exception/ExceptionUtils",
-        methodName, "(Ljava/lang/Throwable;)Ljava/lang/String;", exc);
+    jthr = invokeMethod(env, &jVal, STATIC, NULL, JC_EXCEPTION_UTILS,
+            methodName, "(Ljava/lang/Throwable;)Ljava/lang/String;", exc);
     if (jthr) {
         destroyLocalReference(env, jthr);
         return NULL;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
index 2d1b7e2fcc2cb..0c1a021b5f8e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
@@ -18,6 +18,7 @@
 
 #include "exception.h"
 #include "hdfs/hdfs.h"
+#include "jclasses.h"
 #include "jni_helper.h"
 #include "platform.h"
 
@@ -26,23 +27,6 @@
 #include 
 #include 
 
-/* Some frequently used Java paths */
-#define HADOOP_CONF     "org/apache/hadoop/conf/Configuration"
-#define HADOOP_PATH     "org/apache/hadoop/fs/Path"
-#define HADOOP_LOCALFS  "org/apache/hadoop/fs/LocalFileSystem"
-#define HADOOP_FS       "org/apache/hadoop/fs/FileSystem"
-#define HADOOP_FSSTATUS "org/apache/hadoop/fs/FsStatus"
-#define HADOOP_BLK_LOC  "org/apache/hadoop/fs/BlockLocation"
-#define HADOOP_DFS      "org/apache/hadoop/hdfs/DistributedFileSystem"
-#define HADOOP_ISTRM    "org/apache/hadoop/fs/FSDataInputStream"
-#define HADOOP_OSTRM    "org/apache/hadoop/fs/FSDataOutputStream"
-#define HADOOP_STAT     "org/apache/hadoop/fs/FileStatus"
-#define HADOOP_FSPERM   "org/apache/hadoop/fs/permission/FsPermission"
-#define JAVA_NET_ISA    "java/net/InetSocketAddress"
-#define JAVA_NET_URI    "java/net/URI"
-#define JAVA_STRING     "java/lang/String"
-#define READ_OPTION     "org/apache/hadoop/fs/ReadOption"
-
 #define JAVA_VOID       "V"
 
 /* Macros for constructing method signatures */
@@ -54,10 +38,32 @@
 
 #define KERBEROS_TICKET_CACHE_PATH "hadoop.security.kerberos.ticket.cache.path"
 
+// StreamCapability flags taken from o.a.h.fs.StreamCapabilities
+#define IS_READ_BYTE_BUFFER_CAPABILITY "in:readbytebuffer"
+#define IS_PREAD_BYTE_BUFFER_CAPABILITY "in:preadbytebuffer"
+
 // Bit fields for hdfsFile_internal flags
 #define HDFS_FILE_SUPPORTS_DIRECT_READ (1<<0)
+#define HDFS_FILE_SUPPORTS_DIRECT_PREAD (1<<1)
 
+/**
+ * Reads bytes using the read(ByteBuffer) API. By using Java
+ * DirectByteBuffers we can avoid copying the bytes onto the Java heap.
+ * Instead the data will be directly copied from kernel space to the C heap.
+ */
 tSize readDirect(hdfsFS fs, hdfsFile f, void* buffer, tSize length);
+
+/**
+ * Reads bytes using the read(long, ByteBuffer) API. By using Java
+ * DirectByteBuffers we can avoid copying the bytes onto the Java heap.
+ * Instead the data will be directly copied from kernel space to the C heap.
+ */
+tSize preadDirect(hdfsFS fs, hdfsFile file, tOffset position, void* buffer,
+                  tSize length);
+
+int preadFullyDirect(hdfsFS fs, hdfsFile file, tOffset position, void* buffer,
+                  tSize length);
+
 static void hdfsFreeFileInfoEntry(hdfsFileInfo *hdfsFileInfo);
 
 /**
@@ -109,9 +115,8 @@ int hdfsGetHedgedReadMetrics(hdfsFS fs, struct hdfsHedgedReadMetrics **metrics)
     }
 
     jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
-                  HADOOP_DFS,
-                  "getHedgedReadMetrics",
-                  "()Lorg/apache/hadoop/hdfs/DFSHedgedReadMetrics;");
+            JC_DISTRIBUTED_FILE_SYSTEM, "getHedgedReadMetrics",
+            "()Lorg/apache/hadoop/hdfs/DFSHedgedReadMetrics;");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsGetHedgedReadMetrics: getHedgedReadMetrics failed");
@@ -126,8 +131,7 @@ int hdfsGetHedgedReadMetrics(hdfsFS fs, struct hdfsHedgedReadMetrics **metrics)
     }
 
     jthr = invokeMethod(env, &jVal, INSTANCE, hedgedReadMetrics,
-                  "org/apache/hadoop/hdfs/DFSHedgedReadMetrics",
-                  "getHedgedReadOps", "()J");
+            JC_DFS_HEDGED_READ_METRICS, "getHedgedReadOps", "()J");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsGetHedgedReadStatistics: getHedgedReadOps failed");
@@ -136,8 +140,7 @@ int hdfsGetHedgedReadMetrics(hdfsFS fs, struct hdfsHedgedReadMetrics **metrics)
     m->hedgedReadOps = jVal.j;
 
     jthr = invokeMethod(env, &jVal, INSTANCE, hedgedReadMetrics,
-                  "org/apache/hadoop/hdfs/DFSHedgedReadMetrics",
-                  "getHedgedReadWins", "()J");
+            JC_DFS_HEDGED_READ_METRICS, "getHedgedReadWins", "()J");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsGetHedgedReadStatistics: getHedgedReadWins failed");
@@ -146,8 +149,7 @@ int hdfsGetHedgedReadMetrics(hdfsFS fs, struct hdfsHedgedReadMetrics **metrics)
     m->hedgedReadOpsWin = jVal.j;
 
     jthr = invokeMethod(env, &jVal, INSTANCE, hedgedReadMetrics,
-                  "org/apache/hadoop/hdfs/DFSHedgedReadMetrics",
-                  "getHedgedReadOpsInCurThread", "()J");
+            JC_DFS_HEDGED_READ_METRICS, "getHedgedReadOpsInCurThread", "()J");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsGetHedgedReadStatistics: getHedgedReadOpsInCurThread failed");
@@ -192,10 +194,9 @@ int hdfsFileGetReadStatistics(hdfsFile file,
         ret = EINVAL;
         goto done;
     }
-    jthr = invokeMethod(env, &jVal, INSTANCE, file->file, 
-                  "org/apache/hadoop/hdfs/client/HdfsDataInputStream",
-                  "getReadStatistics",
-                  "()Lorg/apache/hadoop/hdfs/ReadStatistics;");
+    jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
+            JC_HDFS_DATA_INPUT_STREAM, "getReadStatistics",
+            "()Lorg/apache/hadoop/hdfs/ReadStatistics;");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsFileGetReadStatistics: getReadStatistics failed");
@@ -208,8 +209,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
         goto done;
     }
     jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
-                  "org/apache/hadoop/hdfs/ReadStatistics",
-                  "getTotalBytesRead", "()J");
+            JC_READ_STATISTICS, "getTotalBytesRead", "()J");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsFileGetReadStatistics: getTotalBytesRead failed");
@@ -218,8 +218,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
     s->totalBytesRead = jVal.j;
 
     jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
-                  "org/apache/hadoop/hdfs/ReadStatistics",
-                  "getTotalLocalBytesRead", "()J");
+            JC_READ_STATISTICS, "getTotalLocalBytesRead", "()J");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsFileGetReadStatistics: getTotalLocalBytesRead failed");
@@ -228,8 +227,8 @@ int hdfsFileGetReadStatistics(hdfsFile file,
     s->totalLocalBytesRead = jVal.j;
 
     jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
-                  "org/apache/hadoop/hdfs/ReadStatistics",
-                  "getTotalShortCircuitBytesRead", "()J");
+            JC_READ_STATISTICS, "getTotalShortCircuitBytesRead",
+            "()J");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsFileGetReadStatistics: getTotalShortCircuitBytesRead failed");
@@ -237,8 +236,8 @@ int hdfsFileGetReadStatistics(hdfsFile file,
     }
     s->totalShortCircuitBytesRead = jVal.j;
     jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
-                  "org/apache/hadoop/hdfs/ReadStatistics",
-                  "getTotalZeroCopyBytesRead", "()J");
+            JC_READ_STATISTICS, "getTotalZeroCopyBytesRead",
+            "()J");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsFileGetReadStatistics: getTotalZeroCopyBytesRead failed");
@@ -280,8 +279,8 @@ int hdfsFileClearReadStatistics(hdfsFile file)
         goto done;
     }
     jthr = invokeMethod(env, NULL, INSTANCE, file->file,
-                  "org/apache/hadoop/hdfs/client/HdfsDataInputStream",
-                  "clearReadStatistics", "()V");
+            JC_HDFS_DATA_INPUT_STREAM, "clearReadStatistics",
+            "()V");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsFileClearReadStatistics: clearReadStatistics failed");
@@ -308,7 +307,7 @@ int hdfsFileIsOpenForWrite(hdfsFile file)
 
 int hdfsFileUsesDirectRead(hdfsFile file)
 {
-    return !!(file->flags & HDFS_FILE_SUPPORTS_DIRECT_READ);
+    return (file->flags & HDFS_FILE_SUPPORTS_DIRECT_READ) != 0;
 }
 
 void hdfsFileDisableDirectRead(hdfsFile file)
@@ -316,6 +315,17 @@ void hdfsFileDisableDirectRead(hdfsFile file)
     file->flags &= ~HDFS_FILE_SUPPORTS_DIRECT_READ;
 }
 
+int hdfsFileUsesDirectPread(hdfsFile file)
+{
+    return (file->flags & HDFS_FILE_SUPPORTS_DIRECT_PREAD) != 0;
+}
+
+void hdfsFileDisableDirectPread(hdfsFile file)
+{
+    file->flags &= ~HDFS_FILE_SUPPORTS_DIRECT_PREAD;
+}
+
+
 int hdfsDisableDomainSocketSecurity(void)
 {
     jthrowable jthr;
@@ -324,8 +334,7 @@ int hdfsDisableDomainSocketSecurity(void)
       errno = EINTERNAL;
       return -1;
     }
-    jthr = invokeMethod(env, NULL, STATIC, NULL,
-            "org/apache/hadoop/net/unix/DomainSocket",
+    jthr = invokeMethod(env, NULL, STATIC, NULL, JC_DOMAIN_SOCKET,
             "disableBindPathValidation", "()V");
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -346,7 +355,7 @@ typedef struct
 
 /**
  * Helper function to create a org.apache.hadoop.fs.Path object.
- * @param env: The JNIEnv pointer. 
+ * @param env: The JNIEnv pointer.
  * @param path: The file-path for which to construct org.apache.hadoop.fs.Path
  * object.
  * @return Returns a jobject on success and NULL on error.
@@ -363,8 +372,8 @@ static jthrowable constructNewObjectOfPath(JNIEnv *env, const char *path,
     if (jthr)
         return jthr;
     //Construct the org.apache.hadoop.fs.Path object
-    jthr = constructNewObjectOfClass(env, &jPath, "org/apache/hadoop/fs/Path",
-                                     "(Ljava/lang/String;)V", jPathString);
+    jthr = constructNewObjectOfCachedClass(env, &jPath, JC_PATH,
+            "(Ljava/lang/String;)V", jPathString);
     destroyLocalReference(env, jPathString);
     if (jthr)
         return jthr;
@@ -383,8 +392,8 @@ static jthrowable hadoopConfGetStr(JNIEnv *env, jobject jConfiguration,
     if (jthr)
         goto done;
     jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
-            HADOOP_CONF, "get", JMETHOD1(JPARAM(JAVA_STRING),
-                                         JPARAM(JAVA_STRING)), jkey);
+            JC_CONFIGURATION, "get", JMETHOD1(JPARAM(JAVA_STRING),
+                    JPARAM(JAVA_STRING)), jkey);
     if (jthr)
         goto done;
     jRet = jVal.l;
@@ -407,7 +416,8 @@ int hdfsConfGetStr(const char *key, char **val)
         ret = EINTERNAL;
         goto done;
     }
-    jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
+    jthr = constructNewObjectOfCachedClass(env, &jConfiguration,
+            JC_CONFIGURATION, "()V");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsConfGetStr(%s): new Configuration", key);
@@ -443,8 +453,8 @@ static jthrowable hadoopConfGetInt(JNIEnv *env, jobject jConfiguration,
     if (jthr)
         return jthr;
     jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
-            HADOOP_CONF, "getInt", JMETHOD2(JPARAM(JAVA_STRING), "I", "I"),
-            jkey, (jint)(*val));
+            JC_CONFIGURATION, "getInt",
+            JMETHOD2(JPARAM(JAVA_STRING), "I", "I"), jkey, (jint)(*val));
     destroyLocalReference(env, jkey);
     if (jthr)
         return jthr;
@@ -464,7 +474,8 @@ int hdfsConfGetInt(const char *key, int32_t *val)
       ret = EINTERNAL;
       goto done;
     }
-    jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
+    jthr = constructNewObjectOfCachedClass(env, &jConfiguration,
+            JC_CONFIGURATION, "()V");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsConfGetInt(%s): new Configuration", key);
@@ -513,7 +524,7 @@ int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const char *key,
                           const char *val)
 {
     struct hdfsBuilderConfOpt *opt, *next;
-    
+
     opt = calloc(1, sizeof(struct hdfsBuilderConfOpt));
     if (!opt)
         return -ENOMEM;
@@ -697,7 +708,8 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
     }
 
     //  jConfiguration = new Configuration();
-    jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
+    jthr = constructNewObjectOfCachedClass(env, &jConfiguration,
+            JC_CONFIGURATION, "()V");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf)));
@@ -713,15 +725,16 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
             goto done;
         }
     }
- 
+
     //Check what type of FileSystem the caller wants...
     if (bld->nn == NULL) {
         // Get a local filesystem.
         if (bld->forceNewInstance) {
             // fs = FileSytem#newInstanceLocal(conf);
-            jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
-                    "newInstanceLocal", JMETHOD1(JPARAM(HADOOP_CONF),
-                    JPARAM(HADOOP_LOCALFS)), jConfiguration);
+            jthr = invokeMethod(env, &jVal, STATIC, NULL,
+                    JC_FILE_SYSTEM, "newInstanceLocal",
+                    JMETHOD1(JPARAM(HADOOP_CONF), JPARAM(HADOOP_LOCALFS)),
+                    jConfiguration);
             if (jthr) {
                 ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                     "hdfsBuilderConnect(%s)",
@@ -731,10 +744,10 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
             jFS = jVal.l;
         } else {
             // fs = FileSytem#getLocal(conf);
-            jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS, "getLocal",
-                             JMETHOD1(JPARAM(HADOOP_CONF),
-                                      JPARAM(HADOOP_LOCALFS)),
-                             jConfiguration);
+            jthr = invokeMethod(env, &jVal, STATIC, NULL,
+                    JC_FILE_SYSTEM, "getLocal",
+                    JMETHOD1(JPARAM(HADOOP_CONF), JPARAM(HADOOP_LOCALFS)),
+                    jConfiguration);
             if (jthr) {
                 ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                     "hdfsBuilderConnect(%s)",
@@ -746,10 +759,10 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
     } else {
         if (!strcmp(bld->nn, "default")) {
             // jURI = FileSystem.getDefaultUri(conf)
-            jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
-                          "getDefaultUri",
-                          "(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/URI;",
-                          jConfiguration);
+            jthr = invokeMethod(env, &jVal, STATIC, NULL,
+                    JC_FILE_SYSTEM, "getDefaultUri",
+                    "(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/URI;",
+                    jConfiguration);
             if (jthr) {
                 ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                     "hdfsBuilderConnect(%s)",
@@ -769,9 +782,9 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
                     hdfsBuilderToStr(bld, buf, sizeof(buf)));
                 goto done;
             }
-            jthr = invokeMethod(env, &jVal, STATIC, NULL, JAVA_NET_URI,
-                             "create", "(Ljava/lang/String;)Ljava/net/URI;",
-                             jURIString);
+            jthr = invokeMethod(env, &jVal, STATIC, NULL,
+                    JC_URI, "create",
+                    "(Ljava/lang/String;)Ljava/net/URI;", jURIString);
             if (jthr) {
                 ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                     "hdfsBuilderConnect(%s)",
@@ -799,11 +812,11 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
             goto done;
         }
         if (bld->forceNewInstance) {
-            jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
-                    "newInstance", JMETHOD3(JPARAM(JAVA_NET_URI), 
-                        JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING),
-                        JPARAM(HADOOP_FS)),
-                    jURI, jConfiguration, jUserString);
+            jthr = invokeMethod(env, &jVal, STATIC, NULL,
+                    JC_FILE_SYSTEM, "newInstance",
+                    JMETHOD3(JPARAM(JAVA_NET_URI), JPARAM(HADOOP_CONF),
+                             JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)), jURI,
+                    jConfiguration, jUserString);
             if (jthr) {
                 ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                     "hdfsBuilderConnect(%s)",
@@ -812,10 +825,11 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
             }
             jFS = jVal.l;
         } else {
-            jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS, "get",
+            jthr = invokeMethod(env, &jVal, STATIC, NULL,
+                    JC_FILE_SYSTEM, "get",
                     JMETHOD3(JPARAM(JAVA_NET_URI), JPARAM(HADOOP_CONF),
-                        JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)),
-                        jURI, jConfiguration, jUserString);
+                            JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)), jURI,
+                            jConfiguration, jUserString);
             if (jthr) {
                 ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                     "hdfsBuilderConnect(%s)",
@@ -877,8 +891,8 @@ int hdfsDisconnect(hdfsFS fs)
         return -1;
     }
 
-    jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
-                     "close", "()V");
+    jthr = invokeMethod(env, NULL, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "close", "()V");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsDisconnect: FileSystem#close");
@@ -909,8 +923,9 @@ static jthrowable getDefaultBlockSize(JNIEnv *env, jobject jFS,
     jthrowable jthr;
     jvalue jVal;
 
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
-                 "getDefaultBlockSize", JMETHOD1(JPARAM(HADOOP_PATH), "J"), jPath);
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "getDefaultBlockSize", JMETHOD1(JPARAM(HADOOP_PATH),
+                    "J"), jPath);
     if (jthr)
         return jthr;
     *out = jVal.j;
@@ -945,14 +960,9 @@ struct hdfsStreamBuilder {
 struct hdfsStreamBuilder *hdfsStreamBuilderAlloc(hdfsFS fs,
                                             const char *path, int flags)
 {
-    size_t path_len = strlen(path);
+    int path_len = strlen(path);
     struct hdfsStreamBuilder *bld;
 
-    // Check for overflow in path_len
-    if (path_len > SIZE_MAX - sizeof(struct hdfsStreamBuilder)) {
-        errno = EOVERFLOW;
-        return NULL;
-    }
     // sizeof(hdfsStreamBuilder->path) includes one byte for the string
     // terminator
     bld = malloc(sizeof(struct hdfsStreamBuilder) + path_len);
@@ -1008,6 +1018,83 @@ int hdfsStreamBuilderSetDefaultBlockSize(struct hdfsStreamBuilder *bld,
     return 0;
 }
 
+/**
+ * Delegates to FsDataInputStream#hasCapability(String). Used to check if a
+ * given input stream supports certain methods, such as
+ * ByteBufferReadable#read(ByteBuffer).
+ *
+ * @param jFile the FsDataInputStream to call hasCapability on
+ * @param capability the name of the capability to query; for a full list of
+ *        possible values see StreamCapabilities
+ *
+ * @return true if the given jFile has the given capability, false otherwise
+ *
+ * @see org.apache.hadoop.fs.StreamCapabilities
+ */
+static int hdfsHasStreamCapability(jobject jFile,
+        const char *capability) {
+    int ret = 0;
+    jthrowable jthr = NULL;
+    jvalue jVal;
+    jstring jCapabilityString = NULL;
+
+    /* Get the JNIEnv* corresponding to current thread */
+    JNIEnv* env = getJNIEnv();
+    if (env == NULL) {
+        errno = EINTERNAL;
+        return 0;
+    }
+
+    jthr = newJavaStr(env, capability, &jCapabilityString);
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                "hdfsHasStreamCapability(%s): newJavaStr", capability);
+        goto done;
+    }
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFile,
+            JC_FS_DATA_INPUT_STREAM, "hasCapability", "(Ljava/lang/String;)Z",
+            jCapabilityString);
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                "hdfsHasStreamCapability(%s): FSDataInputStream#hasCapability",
+                capability);
+        goto done;
+    }
+
+done:
+    destroyLocalReference(env, jthr);
+    destroyLocalReference(env, jCapabilityString);
+    if (ret) {
+        errno = ret;
+        return 0;
+    }
+    if (jVal.z == JNI_TRUE) {
+        return 1;
+    }
+    return 0;
+}
+
+/**
+ * Sets the flags of the given hdfsFile based on the capabilities of the
+ * underlying stream.
+ *
+ * @param file file->flags will be updated based on the capabilities of jFile
+ * @param jFile the underlying stream to check for capabilities
+ */
+static void setFileFlagCapabilities(hdfsFile file, jobject jFile) {
+    // Check the StreamCapabilities of jFile to see if we can do direct
+    // reads
+    if (hdfsHasStreamCapability(jFile, IS_READ_BYTE_BUFFER_CAPABILITY)) {
+        file->flags |= HDFS_FILE_SUPPORTS_DIRECT_READ;
+    }
+
+    // Check the StreamCapabilities of jFile to see if we can do direct
+    // preads
+    if (hdfsHasStreamCapability(jFile, IS_PREAD_BYTE_BUFFER_CAPABILITY)) {
+        file->flags |= HDFS_FILE_SUPPORTS_DIRECT_PREAD;
+    }
+}
+
 static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
                   int32_t bufferSize, int16_t replication, int64_t blockSize)
 {
@@ -1018,7 +1105,7 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
        return f{is|os};
     */
     int accmode = flags & O_ACCMODE;
-    jstring jStrBufferSize = NULL, jStrReplication = NULL;
+    jstring jStrBufferSize = NULL, jStrReplication = NULL, jCapabilityString = NULL;
     jobject jConfiguration = NULL, jPath = NULL, jFile = NULL;
     jobject jFS = (jobject)fs;
     jthrowable jthr;
@@ -1047,24 +1134,26 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
       errno = ENOTSUP;
       return NULL;
     } else {
-      fprintf(stderr, "ERROR: cannot open an hdfs file in mode 0x%x\n", accmode);
+      fprintf(stderr, "ERROR: cannot open an hdfs file in mode 0x%x\n",
+              accmode);
       errno = EINVAL;
       return NULL;
     }
 
     if ((flags & O_CREAT) && (flags & O_EXCL)) {
-      fprintf(stderr, "WARN: hdfs does not truly support O_CREATE && O_EXCL\n");
+      fprintf(stderr,
+              "WARN: hdfs does not truly support O_CREATE && O_EXCL\n");
     }
 
     if (accmode == O_RDONLY) {
-	method = "open";
-        signature = JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM));
+        method = "open";
+        signature = JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_FSDISTRM));
     } else if (flags & O_APPEND) {
-	method = "append";
-	signature = JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_OSTRM));
+        method = "append";
+        signature = JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_FSDOSTRM));
     } else {
-	method = "create";
-	signature = JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM));
+        method = "create";
+        signature = JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_FSDOSTRM));
     }
 
     /* Create an object of org.apache.hadoop.fs.Path */
@@ -1076,8 +1165,8 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
     }
 
     /* Get the Configuration object from the FileSystem object */
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
-                     "getConf", JMETHOD1("", JPARAM(HADOOP_CONF)));
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "getConf", JMETHOD1("", JPARAM(HADOOP_CONF)));
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsOpenFile(%s): FileSystem#getConf", path);
@@ -1085,7 +1174,7 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
     }
     jConfiguration = jVal.l;
 
-    jStrBufferSize = (*env)->NewStringUTF(env, "io.file.buffer.size"); 
+    jStrBufferSize = (*env)->NewStringUTF(env, "io.file.buffer.size");
     if (!jStrBufferSize) {
         ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL, "OOM");
         goto done;
@@ -1097,9 +1186,9 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
     }
 
     if (!bufferSize) {
-        jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration, 
-                         HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
-                         jStrBufferSize, 4096);
+        jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
+                JC_CONFIGURATION, "getInt",
+                "(Ljava/lang/String;I)I", jStrBufferSize, 4096);
         if (jthr) {
             ret = printExceptionAndFree(env, jthr, NOPRINT_EXC_FILE_NOT_FOUND |
                 NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_UNRESOLVED_LINK,
@@ -1112,9 +1201,9 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
 
     if ((accmode == O_WRONLY) && (flags & O_APPEND) == 0) {
         if (!replication) {
-            jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration, 
-                             HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
-                             jStrReplication, 1);
+            jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
+                             JC_CONFIGURATION, "getInt",
+                             "(Ljava/lang/String;I)I", jStrReplication, 1);
             if (jthr) {
                 ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                     "hdfsOpenFile(%s): Configuration#getInt(dfs.replication)",
@@ -1124,18 +1213,18 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
             jReplication = (jshort)jVal.i;
         }
     }
- 
+
     /* Create and return either the FSDataInputStream or
        FSDataOutputStream references jobject jStream */
 
     // READ?
     if (accmode == O_RDONLY) {
-        jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
-                       method, signature, jPath, jBufferSize);
+        jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+                method, signature, jPath, jBufferSize);
     }  else if ((accmode == O_WRONLY) && (flags & O_APPEND)) {
         // WRITE/APPEND?
-       jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
-                       method, signature, jPath);
+       jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+               method, signature, jPath);
     } else {
         // WRITE/CREATE
         jboolean jOverWrite = 1;
@@ -1148,9 +1237,9 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
                 goto done;
             }
         }
-        jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
-                         method, signature, jPath, jOverWrite,
-                         jBufferSize, jReplication, jBlockSize);
+        jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+                method, signature, jPath, jOverWrite, jBufferSize,
+                jReplication, jBlockSize);
     }
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1165,58 +1254,524 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
         ret = ENOMEM;
         goto done;
     }
-    file->file = (*env)->NewGlobalRef(env, jFile);
-    if (!file->file) {
-        ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
-            "hdfsOpenFile(%s): NewGlobalRef", path); 
-        goto done;
+    file->file = (*env)->NewGlobalRef(env, jFile);
+    if (!file->file) {
+        ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+            "hdfsOpenFile(%s): NewGlobalRef", path);
+        goto done;
+    }
+    file->type = (((flags & O_WRONLY) == 0) ? HDFS_STREAM_INPUT :
+        HDFS_STREAM_OUTPUT);
+    file->flags = 0;
+
+    if ((flags & O_WRONLY) == 0) {
+        // Check the StreamCapabilities of jFile to see if we can do direct
+        // reads
+        if (hdfsHasStreamCapability(jFile, "in:readbytebuffer")) {
+            file->flags |= HDFS_FILE_SUPPORTS_DIRECT_READ;
+        }
+
+        // Check the StreamCapabilities of jFile to see if we can do direct
+        // preads
+        if (hdfsHasStreamCapability(jFile, "in:preadbytebuffer")) {
+            file->flags |= HDFS_FILE_SUPPORTS_DIRECT_PREAD;
+        }
+        setFileFlagCapabilities(file, jFile);
+        }
+    ret = 0;
+
+done:
+    destroyLocalReference(env, jStrBufferSize);
+    destroyLocalReference(env, jStrReplication);
+    destroyLocalReference(env, jConfiguration);
+    destroyLocalReference(env, jPath);
+    destroyLocalReference(env, jFile);
+    destroyLocalReference(env, jCapabilityString);
+
+    if (ret) {
+        if (file) {
+            if (file->file) {
+                (*env)->DeleteGlobalRef(env, file->file);
+            }
+            free(file);
+        }
+        errno = ret;
+        return NULL;
+    }
+    return file;
+}
+
+hdfsFile hdfsStreamBuilderBuild(struct hdfsStreamBuilder *bld)
+{
+    hdfsFile file = hdfsOpenFileImpl(bld->fs, bld->path, bld->flags,
+                  bld->bufferSize, bld->replication, bld->defaultBlockSize);
+    int prevErrno = errno;
+    hdfsStreamBuilderFree(bld);
+    errno = prevErrno;
+    return file;
+}
+
+/**
+ * A wrapper around o.a.h.fs.FutureDataInputStreamBuilder and the file name
+ * associated with the builder.
+ */
+struct hdfsOpenFileBuilder {
+    jobject jBuilder;
+    const char *path;
+};
+
+/**
+ * A wrapper around a java.util.concurrent.Future (created by calling
+ * FutureDataInputStreamBuilder#build) and the file name associated with the
+ * builder.
+ */
+struct hdfsOpenFileFuture {
+    jobject jFuture;
+    const char *path;
+};
+
+hdfsOpenFileBuilder *hdfsOpenFileBuilderAlloc(hdfsFS fs,
+        const char *path) {
+    int ret = 0;
+    jthrowable jthr;
+    jvalue jVal;
+    jobject jFS = (jobject) fs;
+
+    jobject jPath = NULL;
+    jobject jBuilder = NULL;
+
+    JNIEnv *env = getJNIEnv();
+    if (!env) {
+        errno = EINTERNAL;
+        return NULL;
+    }
+
+    hdfsOpenFileBuilder *builder;
+    builder = calloc(1, sizeof(hdfsOpenFileBuilder));
+    if (!builder) {
+        fprintf(stderr, "hdfsOpenFileBuilderAlloc(%s): OOM when creating "
+                        "hdfsOpenFileBuilder\n", path);
+        errno = ENOMEM;
+        goto done;
+    }
+    builder->path = path;
+
+    jthr = constructNewObjectOfPath(env, path, &jPath);
+    if (jthr) {
+        errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                "hdfsOpenFileBuilderAlloc(%s): constructNewObjectOfPath",
+                path);
+        goto done;
+    }
+
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "openFile", JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_FDISB)),
+            jPath);
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                "hdfsOpenFileBuilderAlloc(%s): %s#openFile(Path) failed",
+                HADOOP_FS, path);
+        goto done;
+    }
+    jBuilder = jVal.l;
+
+    builder->jBuilder = (*env)->NewGlobalRef(env, jBuilder);
+    if (!builder->jBuilder) {
+        printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+                "hdfsOpenFileBuilderAlloc(%s): NewGlobalRef(%s) failed", path,
+                HADOOP_FDISB);
+        ret = EINVAL;
+        goto done;
+    }
+
+done:
+    destroyLocalReference(env, jPath);
+    destroyLocalReference(env, jBuilder);
+    if (ret) {
+        if (builder) {
+            if (builder->jBuilder) {
+                (*env)->DeleteGlobalRef(env, builder->jBuilder);
+            }
+            free(builder);
+        }
+        errno = ret;
+        return NULL;
+    }
+    return builder;
+}
+
+/**
+ * Used internally by hdfsOpenFileBuilderWithOption to switch between
+ * FSBuilder#must and #opt.
+ */
+typedef enum { must, opt } openFileBuilderOptionType;
+
+/**
+ * Shared implementation of hdfsOpenFileBuilderMust and hdfsOpenFileBuilderOpt
+ * that switches between each method depending on the value of
+ * openFileBuilderOptionType.
+ */
+static hdfsOpenFileBuilder *hdfsOpenFileBuilderWithOption(
+        hdfsOpenFileBuilder *builder, const char *key,
+        const char *value, openFileBuilderOptionType optionType) {
+    int ret = 0;
+    jthrowable jthr;
+    jvalue jVal;
+    jobject localJBuilder = NULL;
+    jobject globalJBuilder;
+    jstring jKeyString = NULL;
+    jstring jValueString = NULL;
+
+    // If the builder was not previously created by a prior call to
+    // hdfsOpenFileBuilderAlloc then exit
+    if (builder == NULL || builder->jBuilder == NULL) {
+        errno = EINVAL;
+        return NULL;
+    }
+
+    JNIEnv *env = getJNIEnv();
+    if (!env) {
+        errno = EINTERNAL;
+        return NULL;
+    }
+    jthr = newJavaStr(env, key, &jKeyString);
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                "hdfsOpenFileBuilderWithOption(%s): newJavaStr(%s)",
+                builder->path, key);
+        goto done;
+    }
+    jthr = newJavaStr(env, value, &jValueString);
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                "hdfsOpenFileBuilderWithOption(%s): newJavaStr(%s)",
+                builder->path, value);
+        goto done;
+    }
+
+    const char *optionTypeMethodName;
+    switch (optionType) {
+        case must:
+            optionTypeMethodName = "must";
+            break;
+        case opt:
+            optionTypeMethodName = "opt";
+            break;
+        default:
+            ret = EINTERNAL;
+            goto done;
+    }
+
+    jthr = invokeMethod(env, &jVal, INSTANCE, builder->jBuilder,
+            JC_FUTURE_DATA_IS_BUILDER, optionTypeMethodName,
+            JMETHOD2(JPARAM(JAVA_STRING), JPARAM(JAVA_STRING),
+                    JPARAM(HADOOP_FS_BLDR)), jKeyString,
+            jValueString);
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                "hdfsOpenFileBuilderWithOption(%s): %s#%s(%s, %s) failed",
+                builder->path, HADOOP_FS_BLDR, optionTypeMethodName, key,
+                value);
+        goto done;
+    }
+
+    localJBuilder = jVal.l;
+    globalJBuilder = (*env)->NewGlobalRef(env, localJBuilder);
+    if (!globalJBuilder) {
+        printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+                "hdfsOpenFileBuilderWithOption(%s): NewGlobalRef(%s) failed",
+                builder->path, HADOOP_FDISB);
+        ret = EINVAL;
+        goto done;
+    }
+    (*env)->DeleteGlobalRef(env, builder->jBuilder);
+    builder->jBuilder = globalJBuilder;
+
+done:
+    destroyLocalReference(env, jKeyString);
+    destroyLocalReference(env, jValueString);
+    destroyLocalReference(env, localJBuilder);
+    if (ret) {
+        errno = ret;
+        return NULL;
+    }
+    return builder;
+}
+
+hdfsOpenFileBuilder *hdfsOpenFileBuilderMust(hdfsOpenFileBuilder *builder,
+        const char *key, const char *value) {
+    openFileBuilderOptionType optionType;
+    optionType = must;
+    return hdfsOpenFileBuilderWithOption(builder, key, value, optionType);
+}
+
+hdfsOpenFileBuilder *hdfsOpenFileBuilderOpt(hdfsOpenFileBuilder *builder,
+        const char *key, const char *value) {
+    openFileBuilderOptionType optionType;
+    optionType = opt;
+    return hdfsOpenFileBuilderWithOption(builder, key, value, optionType);
+}
+
+hdfsOpenFileFuture *hdfsOpenFileBuilderBuild(hdfsOpenFileBuilder *builder) {
+    int ret = 0;
+    jthrowable jthr;
+    jvalue jVal;
+
+    jobject jFuture = NULL;
+
+    // If the builder was not previously created by a prior call to
+    // hdfsOpenFileBuilderAlloc then exit
+    if (builder == NULL || builder->jBuilder == NULL) {
+        ret = EINVAL;
+        return NULL;
+    }
+
+    JNIEnv *env = getJNIEnv();
+    if (!env) {
+        errno = EINTERNAL;
+        return NULL;
+    }
+
+    hdfsOpenFileFuture *future;
+    future = calloc(1, sizeof(hdfsOpenFileFuture));
+    if (!future) {
+        fprintf(stderr, "hdfsOpenFileBuilderBuild: OOM when creating "
+                        "hdfsOpenFileFuture\n");
+        errno = ENOMEM;
+        goto done;
+    }
+    future->path = builder->path;
+
+    jthr = invokeMethod(env, &jVal, INSTANCE, builder->jBuilder,
+            JC_FUTURE_DATA_IS_BUILDER, "build",
+            JMETHOD1("", JPARAM(JAVA_CFUTURE)));
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                "hdfsOpenFileBuilderBuild(%s): %s#build() failed",
+                builder->path, HADOOP_FDISB);
+        goto done;
+    }
+    jFuture = jVal.l;
+
+    future->jFuture = (*env)->NewGlobalRef(env, jFuture);
+    if (!future->jFuture) {
+        printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+                "hdfsOpenFileBuilderBuild(%s): NewGlobalRef(%s) failed",
+                builder->path, JAVA_CFUTURE);
+        ret = EINVAL;
+        goto done;
+    }
+
+done:
+    destroyLocalReference(env, jFuture);
+    if (ret) {
+        if (future) {
+            if (future->jFuture) {
+                (*env)->DeleteGlobalRef(env, future->jFuture);
+            }
+            free(future);
+        }
+        hdfsOpenFileBuilderFree(builder);
+        errno = ret;
+        return NULL;
+    }
+    hdfsOpenFileBuilderFree(builder);
+    return future;
+}
+
+void hdfsOpenFileBuilderFree(hdfsOpenFileBuilder *builder) {
+    JNIEnv *env;
+    env = getJNIEnv();
+    if (!env) {
+        return;
+    }
+    if (builder->jBuilder) {
+        (*env)->DeleteGlobalRef(env, builder->jBuilder);
+        builder->jBuilder = NULL;
+    }
+    free(builder);
+}
+
+/**
+ * Shared implementation of hdfsOpenFileFutureGet and
+ * hdfsOpenFileFutureGetWithTimeout. If a timeout is specified, calls
+ * Future#get() otherwise it calls Future#get(long, TimeUnit).
+ */
+static hdfsFile fileFutureGetWithTimeout(hdfsOpenFileFuture *future,
+        int64_t timeout, jobject jTimeUnit) {
+    int ret = 0;
+    jthrowable jthr;
+    jvalue jVal;
+
+    hdfsFile file = NULL;
+    jobject jFile = NULL;
+
+    JNIEnv *env = getJNIEnv();
+    if (!env) {
+        ret = EINTERNAL;
+        return NULL;
+    }
+
+    if (!jTimeUnit) {
+        jthr = invokeMethod(env, &jVal, INSTANCE, future->jFuture,
+                JC_CFUTURE, "get", JMETHOD1("", JPARAM(JAVA_OBJECT)));
+    } else {
+        jthr = invokeMethod(env, &jVal, INSTANCE, future->jFuture,
+                JC_CFUTURE, "get", JMETHOD2("J",
+                        JPARAM(JAVA_TIMEUNIT), JPARAM(JAVA_OBJECT)), timeout,
+                        jTimeUnit);
+    }
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                "hdfsOpenFileFutureGet(%s): %s#get failed", future->path,
+                JAVA_CFUTURE);
+        goto done;
+    }
+
+    file = calloc(1, sizeof(struct hdfsFile_internal));
+    if (!file) {
+        fprintf(stderr, "hdfsOpenFileFutureGet(%s): OOM when creating "
+                        "hdfsFile\n", future->path);
+        ret = ENOMEM;
+        goto done;
+    }
+    jFile = jVal.l;
+    file->file = (*env)->NewGlobalRef(env, jFile);
+    if (!file->file) {
+        ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+                "hdfsOpenFileFutureGet(%s): NewGlobalRef(jFile) failed",
+                future->path);
+        goto done;
+    }
+
+    file->type = HDFS_STREAM_INPUT;
+    file->flags = 0;
+
+    setFileFlagCapabilities(file, jFile);
+
+done:
+    destroyLocalReference(env, jTimeUnit);
+    destroyLocalReference(env, jFile);
+    if (ret) {
+        if (file) {
+            if (file->file) {
+                (*env)->DeleteGlobalRef(env, file->file);
+            }
+            free(file);
+        }
+        errno = ret;
+        return NULL;
+    }
+    return file;
+}
+
+hdfsFile hdfsOpenFileFutureGet(hdfsOpenFileFuture *future) {
+    return fileFutureGetWithTimeout(future, -1, NULL);
+}
+
+hdfsFile hdfsOpenFileFutureGetWithTimeout(hdfsOpenFileFuture *future,
+        int64_t timeout, javaConcurrentTimeUnit timeUnit) {
+    int ret = 0;
+    jthrowable jthr;
+    jobject jTimeUnit = NULL;
+
+    JNIEnv *env = getJNIEnv();
+    if (!env) {
+        ret = EINTERNAL;
+        return NULL;
+    }
+
+    const char *timeUnitEnumName;
+    switch (timeUnit) {
+        case jNanoseconds:
+            timeUnitEnumName = "NANOSECONDS";
+            break;
+        case jMicroseconds:
+            timeUnitEnumName = "MICROSECONDS";
+            break;
+        case jMilliseconds:
+            timeUnitEnumName = "MILLISECONDS";
+            break;
+        case jSeconds:
+            timeUnitEnumName = "SECONDS";
+            break;
+        case jMinutes:
+            timeUnitEnumName = "MINUTES";
+            break;
+        case jHours:
+            timeUnitEnumName = "HOURS";
+            break;
+        case jDays:
+            timeUnitEnumName = "DAYS";
+            break;
+        default:
+            ret = EINTERNAL;
+            goto done;
+    }
+
+    jthr = fetchEnumInstance(env, JAVA_TIMEUNIT, timeUnitEnumName, &jTimeUnit);
+
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                "hdfsOpenFileFutureGet(%s): %s#get failed", future->path,
+                JAVA_CFUTURE);
+        goto done;
+    }
+    return fileFutureGetWithTimeout(future, timeout, jTimeUnit);
+
+done:
+    if (ret) {
+        errno = ret;
     }
-    file->type = (((flags & O_WRONLY) == 0) ? HDFS_STREAM_INPUT :
-        HDFS_STREAM_OUTPUT);
-    file->flags = 0;
+    return NULL;
+}
 
-    if ((flags & O_WRONLY) == 0) {
-        // Try a test read to see if we can do direct reads
-        char buf;
-        if (readDirect(fs, file, &buf, 0) == 0) {
-            // Success - 0-byte read should return 0
-            file->flags |= HDFS_FILE_SUPPORTS_DIRECT_READ;
-        } else if (errno != ENOTSUP) {
-            // Unexpected error. Clear it, don't set the direct flag.
-            fprintf(stderr,
-                  "hdfsOpenFile(%s): WARN: Unexpected error %d when testing "
-                  "for direct read compatibility\n", path, errno);
-        }
+int hdfsOpenFileFutureCancel(hdfsOpenFileFuture *future,
+        int mayInterruptIfRunning) {
+    int ret = 0;
+    jthrowable jthr;
+    jvalue jVal;
+
+    jboolean jMayInterruptIfRunning;
+
+    JNIEnv *env = getJNIEnv();
+    if (!env) {
+        ret = EINTERNAL;
+        return -1;
+    }
+
+    jMayInterruptIfRunning = mayInterruptIfRunning ? JNI_TRUE : JNI_FALSE;
+    jthr = invokeMethod(env, &jVal, INSTANCE, future->jFuture, JC_CFUTURE,
+            "cancel", JMETHOD1("Z", "Z"), jMayInterruptIfRunning);
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                "hdfsOpenFileFutureCancel(%s): %s#cancel failed", future->path,
+                JAVA_CFUTURE);
+        goto done;
     }
-    ret = 0;
 
 done:
-    destroyLocalReference(env, jStrBufferSize);
-    destroyLocalReference(env, jStrReplication);
-    destroyLocalReference(env, jConfiguration); 
-    destroyLocalReference(env, jPath); 
-    destroyLocalReference(env, jFile); 
     if (ret) {
-        if (file) {
-            if (file->file) {
-                (*env)->DeleteGlobalRef(env, file->file);
-            }
-            free(file);
-        }
         errno = ret;
-        return NULL;
+        return -1;
     }
-    return file;
+    if (!jVal.z) {
+        return -1;
+    }
+    return 0;
 }
 
-hdfsFile hdfsStreamBuilderBuild(struct hdfsStreamBuilder *bld)
-{
-    hdfsFile file = hdfsOpenFileImpl(bld->fs, bld->path, bld->flags,
-                  bld->bufferSize, bld->replication, bld->defaultBlockSize);
-    int prevErrno = errno;
-    hdfsStreamBuilderFree(bld);
-    errno = prevErrno;
-    return file;
+void hdfsOpenFileFutureFree(hdfsOpenFileFuture *future) {
+    JNIEnv *env;
+    env = getJNIEnv();
+    if (!env) {
+        return;
+    }
+    if (future->jFuture) {
+        (*env)->DeleteGlobalRef(env, future->jFuture);
+        future->jFuture = NULL;
+    }
+    free(future);
 }
 
 int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength)
@@ -1241,9 +1796,9 @@ int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength)
         return -1;
     }
 
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
-                        "truncate", JMETHOD2(JPARAM(HADOOP_PATH), "J", "Z"),
-                        jPath, newlength);
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "truncate", JMETHOD2(JPARAM(HADOOP_PATH), "J", "Z"),
+            jPath, newlength);
     destroyLocalReference(env, jPath);
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1270,11 +1825,11 @@ int hdfsUnbufferFile(hdfsFile file)
         ret = ENOTSUP;
         goto done;
     }
-    jthr = invokeMethod(env, NULL, INSTANCE, file->file, HADOOP_ISTRM,
-                     "unbuffer", "()V");
+    jthr = invokeMethod(env, NULL, INSTANCE, file->file,
+            JC_FS_DATA_INPUT_STREAM, "unbuffer", "()V");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                HADOOP_ISTRM "#unbuffer failed:");
+                HADOOP_FSDISTRM "#unbuffer failed:");
         goto done;
     }
     ret = 0;
@@ -1288,10 +1843,10 @@ int hdfsCloseFile(hdfsFS fs, hdfsFile file)
 {
     int ret;
     // JAVA EQUIVALENT:
-    //  file.close 
+    //  file.close
 
     //The interface whose 'close' method to be called
-    const char *interface;
+    CachedJavaClass cachedJavaClass;
     const char *interfaceShortName;
 
     //Caught exception
@@ -1310,13 +1865,16 @@ int hdfsCloseFile(hdfsFS fs, hdfsFile file)
         return -1;
     }
 
-    interface = (file->type == HDFS_STREAM_INPUT) ?
-        HADOOP_ISTRM : HADOOP_OSTRM;
-  
-    jthr = invokeMethod(env, NULL, INSTANCE, file->file, interface,
-                     "close", "()V");
+    if (file->type == HDFS_STREAM_INPUT) {
+        cachedJavaClass = JC_FS_DATA_INPUT_STREAM;
+    } else {
+        cachedJavaClass = JC_FS_DATA_OUTPUT_STREAM;
+    }
+
+    jthr = invokeMethod(env, NULL, INSTANCE, file->file,
+            cachedJavaClass, "close", "()V");
     if (jthr) {
-        interfaceShortName = (file->type == HDFS_STREAM_INPUT) ? 
+        interfaceShortName = (file->type == HDFS_STREAM_INPUT) ?
             "FSDataInputStream" : "FSDataOutputStream";
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                 "%s#close", interfaceShortName);
@@ -1347,7 +1905,7 @@ int hdfsExists(hdfsFS fs, const char *path)
         errno = EINTERNAL;
         return -1;
     }
-    
+
     if (path == NULL) {
         errno = EINVAL;
         return -1;
@@ -1358,7 +1916,7 @@ int hdfsExists(hdfsFS fs, const char *path)
             "hdfsExists: constructNewObjectOfPath");
         return -1;
     }
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
             "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"), jPath);
     destroyLocalReference(env, jPath);
     if (jthr) {
@@ -1397,11 +1955,17 @@ static int readPrepare(JNIEnv* env, hdfsFS fs, hdfsFile f,
     return 0;
 }
 
+/**
+ * If the underlying stream supports the ByteBufferReadable interface then
+ * this method will transparently use read(ByteBuffer). This can help
+ * improve performance as it avoids unnecessarily copying data on to the Java
+ * heap. Instead the data will be directly copied from kernel space to the C
+ * heap.
+ */
 tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
 {
     jobject jInputStream;
     jbyteArray jbRarray;
-    jint noReadBytes = length;
     jvalue jVal;
     jthrowable jthr;
     JNIEnv* env;
@@ -1440,8 +2004,8 @@ tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
         return -1;
     }
 
-    jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream, HADOOP_ISTRM,
-                               "read", "([B)I", jbRarray);
+    jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
+            JC_FS_DATA_INPUT_STREAM, "read", "([B)I", jbRarray);
     if (jthr) {
         destroyLocalReference(env, jbRarray);
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1457,7 +2021,12 @@ tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
         errno = EINTR;
         return -1;
     }
-    (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
+    // We only copy the portion of the jbRarray that was actually filled by
+    // the call to FsDataInputStream#read; #read is not guaranteed to fill the
+    // entire buffer, instead it returns the number of bytes read into the
+    // buffer; we use the return value as the input in GetByteArrayRegion to
+    // ensure don't copy more bytes than necessary
+    (*env)->GetByteArrayRegion(env, jbRarray, 0, jVal.i, buffer);
     destroyLocalReference(env, jbRarray);
     if ((*env)->ExceptionCheck(env)) {
         errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
@@ -1467,12 +2036,11 @@ tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
     return jVal.i;
 }
 
-// Reads using the read(ByteBuffer) API, which does fewer copies
 tSize readDirect(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
 {
     // JAVA EQUIVALENT:
-    //  ByteBuffer bbuffer = ByteBuffer.allocateDirect(length) // wraps C buffer
-    //  fis.read(bbuffer);
+    //  ByteBuffer buf = ByteBuffer.allocateDirect(length) // wraps C buffer
+    //  fis.read(buf);
 
     jobject jInputStream;
     jvalue jVal;
@@ -1499,16 +2067,33 @@ tSize readDirect(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
     }
 
     jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
-        HADOOP_ISTRM, "read", "(Ljava/nio/ByteBuffer;)I", bb);
+            JC_FS_DATA_INPUT_STREAM, "read",
+            "(Ljava/nio/ByteBuffer;)I", bb);
     destroyLocalReference(env, bb);
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "readDirect: FSDataInputStream#read");
         return -1;
     }
-    return (jVal.i < 0) ? 0 : jVal.i;
+    // Reached EOF, return 0
+    if (jVal.i < 0) {
+        return 0;
+    }
+    // 0 bytes read, return error
+    if (jVal.i == 0) {
+        errno = EINTR;
+        return -1;
+    }
+    return jVal.i;
 }
 
+/**
+ * If the underlying stream supports the ByteBufferPositionedReadable
+ * interface then this method will transparently use read(long, ByteBuffer).
+ * This can help improve performance as it avoids unnecessarily copying data
+ * on to the Java heap. Instead the data will be directly copied from kernel
+ * space to the C heap.
+ */
 tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
                 void* buffer, tSize length)
 {
@@ -1528,6 +2113,10 @@ tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
         return -1;
     }
 
+    if (f->flags & HDFS_FILE_SUPPORTS_DIRECT_PREAD) {
+      return preadDirect(fs, f, position, buffer, length);
+    }
+
     env = getJNIEnv();
     if (env == NULL) {
       errno = EINTERNAL;
@@ -1550,8 +2139,10 @@ tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
             "hdfsPread: NewByteArray");
         return -1;
     }
-    jthr = invokeMethod(env, &jVal, INSTANCE, f->file, HADOOP_ISTRM,
-                     "read", "(J[BII)I", position, jbRarray, 0, length);
+
+    jthr = invokeMethod(env, &jVal, INSTANCE, f->file,
+            JC_FS_DATA_INPUT_STREAM, "read", "(J[BII)I", position,
+            jbRarray, 0, length);
     if (jthr) {
         destroyLocalReference(env, jbRarray);
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1577,6 +2168,173 @@ tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
     return jVal.i;
 }
 
+tSize preadDirect(hdfsFS fs, hdfsFile f, tOffset position, void* buffer,
+                  tSize length)
+{
+    // JAVA EQUIVALENT:
+    //  ByteBuffer buf = ByteBuffer.allocateDirect(length) // wraps C buffer
+    //  fis.read(position, buf);
+
+    jvalue jVal;
+    jthrowable jthr;
+    jobject bb;
+
+    //Get the JNIEnv* corresponding to current thread
+    JNIEnv* env = getJNIEnv();
+    if (env == NULL) {
+      errno = EINTERNAL;
+      return -1;
+    }
+
+    //Error checking... make sure that this file is 'readable'
+    if (f->type != HDFS_STREAM_INPUT) {
+        fprintf(stderr, "Cannot read from a non-InputStream object!\n");
+        errno = EINVAL;
+        return -1;
+    }
+
+    //Read the requisite bytes
+    bb = (*env)->NewDirectByteBuffer(env, buffer, length);
+    if (bb == NULL) {
+        errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+            "readDirect: NewDirectByteBuffer");
+        return -1;
+    }
+
+    jthr = invokeMethod(env, &jVal, INSTANCE, f->file,
+            JC_FS_DATA_INPUT_STREAM, "read", "(JLjava/nio/ByteBuffer;)I",
+            position, bb);
+    destroyLocalReference(env, bb);
+    if (jthr) {
+       errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+           "preadDirect: FSDataInputStream#read");
+       return -1;
+    }
+    // Reached EOF, return 0
+    if (jVal.i < 0) {
+        return 0;
+    }
+    // 0 bytes read, return error
+    if (jVal.i == 0) {
+        errno = EINTR;
+        return -1;
+    }
+    return jVal.i;
+}
+
+/**
+ * Like hdfsPread, if the underlying stream supports the
+ * ByteBufferPositionedReadable interface then this method will transparently
+ * use readFully(long, ByteBuffer).
+ */
+int hdfsPreadFully(hdfsFS fs, hdfsFile f, tOffset position,
+                void* buffer, tSize length) {
+    JNIEnv* env;
+    jbyteArray jbRarray;
+    jthrowable jthr;
+
+    if (length == 0) {
+        return 0;
+    } else if (length < 0) {
+        errno = EINVAL;
+        return -1;
+    }
+    if (!f || f->type == HDFS_STREAM_UNINITIALIZED) {
+        errno = EBADF;
+        return -1;
+    }
+
+    if (f->flags & HDFS_FILE_SUPPORTS_DIRECT_PREAD) {
+        return preadFullyDirect(fs, f, position, buffer, length);
+    }
+
+    env = getJNIEnv();
+    if (env == NULL) {
+        errno = EINTERNAL;
+        return -1;
+    }
+
+    //Error checking... make sure that this file is 'readable'
+    if (f->type != HDFS_STREAM_INPUT) {
+        fprintf(stderr, "Cannot read from a non-InputStream object!\n");
+        errno = EINVAL;
+        return -1;
+    }
+
+    // JAVA EQUIVALENT:
+    //  byte [] bR = new byte[length];
+    //  fis.read(pos, bR, 0, length);
+    jbRarray = (*env)->NewByteArray(env, length);
+    if (!jbRarray) {
+        errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+                                             "hdfsPread: NewByteArray");
+        return -1;
+    }
+
+    jthr = invokeMethod(env, NULL, INSTANCE, f->file,
+                        JC_FS_DATA_INPUT_STREAM, "readFully", "(J[BII)V",
+                        position, jbRarray, 0, length);
+    if (jthr) {
+        destroyLocalReference(env, jbRarray);
+        errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                      "hdfsPread: FSDataInputStream#read");
+        return -1;
+    }
+
+    (*env)->GetByteArrayRegion(env, jbRarray, 0, length, buffer);
+    destroyLocalReference(env, jbRarray);
+    if ((*env)->ExceptionCheck(env)) {
+        errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+                "hdfsPread: GetByteArrayRegion");
+        return -1;
+    }
+    return 0;
+}
+
+int preadFullyDirect(hdfsFS fs, hdfsFile f, tOffset position, void* buffer,
+                  tSize length)
+{
+    // JAVA EQUIVALENT:
+    //  ByteBuffer buf = ByteBuffer.allocateDirect(length) // wraps C buffer
+    //  fis.read(position, buf);
+
+    jthrowable jthr;
+    jobject bb;
+
+    //Get the JNIEnv* corresponding to current thread
+    JNIEnv* env = getJNIEnv();
+    if (env == NULL) {
+        errno = EINTERNAL;
+        return -1;
+    }
+
+    //Error checking... make sure that this file is 'readable'
+    if (f->type != HDFS_STREAM_INPUT) {
+        fprintf(stderr, "Cannot read from a non-InputStream object!\n");
+        errno = EINVAL;
+        return -1;
+    }
+
+    //Read the requisite bytes
+    bb = (*env)->NewDirectByteBuffer(env, buffer, length);
+    if (bb == NULL) {
+        errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+                "readDirect: NewDirectByteBuffer");
+        return -1;
+    }
+
+    jthr = invokeMethod(env, NULL, INSTANCE, f->file,
+            JC_FS_DATA_INPUT_STREAM, "readFully",
+            "(JLjava/nio/ByteBuffer;)V", position, bb);
+    destroyLocalReference(env, bb);
+    if (jthr) {
+        errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                "preadDirect: FSDataInputStream#read");
+        return -1;
+    }
+    return 0;
+}
+
 tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
 {
     // JAVA EQUIVALENT
@@ -1601,7 +2359,7 @@ tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
     }
 
     jOutputStream = f->file;
-    
+
     if (length < 0) {
     	errno = EINVAL;
     	return -1;
@@ -1636,7 +2394,8 @@ tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
         return -1;
     }
     jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
-            HADOOP_OSTRM, "write", "([B)V", jbWarray);
+            JC_FS_DATA_OUTPUT_STREAM, "write", "([B)V",
+            jbWarray);
     destroyLocalReference(env, jbWarray);
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1648,7 +2407,7 @@ tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
     return length;
 }
 
-int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos) 
+int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
 {
     // JAVA EQUIVALENT
     //  fis.seek(pos);
@@ -1671,7 +2430,7 @@ int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
 
     jInputStream = f->file;
     jthr = invokeMethod(env, NULL, INSTANCE, jInputStream,
-            HADOOP_ISTRM, "seek", "(J)V", desiredPos);
+            JC_FS_DATA_INPUT_STREAM, "seek", "(J)V", desiredPos);
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsSeek(desiredPos=%" PRId64 ")"
@@ -1681,15 +2440,13 @@ int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
     return 0;
 }
 
-
-
 tOffset hdfsTell(hdfsFS fs, hdfsFile f)
 {
     // JAVA EQUIVALENT
     //  pos = f.getPos();
 
     jobject jStream;
-    const char *interface;
+    CachedJavaClass cachedJavaClass;
     jvalue jVal;
     jthrowable jthr;
 
@@ -1708,10 +2465,13 @@ tOffset hdfsTell(hdfsFS fs, hdfsFile f)
 
     //Parameters
     jStream = f->file;
-    interface = (f->type == HDFS_STREAM_INPUT) ?
-        HADOOP_ISTRM : HADOOP_OSTRM;
+    if (f->type == HDFS_STREAM_INPUT) {
+        cachedJavaClass = JC_FS_DATA_INPUT_STREAM;
+    } else {
+        cachedJavaClass = JC_FS_DATA_OUTPUT_STREAM;
+    }
     jthr = invokeMethod(env, &jVal, INSTANCE, jStream,
-                     interface, "getPos", "()J");
+            cachedJavaClass, "getPos", "()J");
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsTell: %s#getPos",
@@ -1722,7 +2482,7 @@ tOffset hdfsTell(hdfsFS fs, hdfsFile f)
     return jVal.j;
 }
 
-int hdfsFlush(hdfsFS fs, hdfsFile f) 
+int hdfsFlush(hdfsFS fs, hdfsFile f)
 {
     // JAVA EQUIVALENT
     //  fos.flush();
@@ -1742,7 +2502,7 @@ int hdfsFlush(hdfsFS fs, hdfsFile f)
         return -1;
     }
     jthr = invokeMethod(env, NULL, INSTANCE, f->file,
-                     HADOOP_OSTRM, "flush", "()V");
+            JC_FS_DATA_OUTPUT_STREAM, "flush", "()V");
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsFlush: FSDataInputStream#flush");
@@ -1771,7 +2531,7 @@ int hdfsHFlush(hdfsFS fs, hdfsFile f)
 
     jOutputStream = f->file;
     jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
-                     HADOOP_OSTRM, "hflush", "()V");
+            JC_FS_DATA_OUTPUT_STREAM, "hflush", "()V");
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsHFlush: FSDataOutputStream#hflush");
@@ -1800,7 +2560,7 @@ int hdfsHSync(hdfsFS fs, hdfsFile f)
 
     jOutputStream = f->file;
     jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
-                     HADOOP_OSTRM, "hsync", "()V");
+            JC_FS_DATA_OUTPUT_STREAM, "hsync", "()V");
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsHSync: FSDataOutputStream#hsync");
@@ -1834,7 +2594,7 @@ int hdfsAvailable(hdfsFS fs, hdfsFile f)
     //Parameters
     jInputStream = f->file;
     jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
-                     HADOOP_ISTRM, "available", "()I");
+            JC_FS_DATA_INPUT_STREAM, "available", "()I");
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsAvailable: FSDataInputStream#available");
@@ -1879,8 +2639,8 @@ static int hdfsCopyImpl(hdfsFS srcFS, const char *src, hdfsFS dstFS,
     }
 
     //Create the org.apache.hadoop.conf.Configuration object
-    jthr = constructNewObjectOfClass(env, &jConfiguration,
-                                     HADOOP_CONF, "()V");
+    jthr = constructNewObjectOfCachedClass(env, &jConfiguration,
+            JC_CONFIGURATION, "()V");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsCopyImpl: Configuration constructor");
@@ -1888,12 +2648,12 @@ static int hdfsCopyImpl(hdfsFS srcFS, const char *src, hdfsFS dstFS,
     }
 
     //FileUtil#copy
-    jthr = invokeMethod(env, &jVal, STATIC,
-            NULL, "org/apache/hadoop/fs/FileUtil", "copy",
+    jthr = invokeMethod(env, &jVal, STATIC, NULL, JC_FILE_UTIL,
+            "copy",
             "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;"
             "Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;"
             "ZLorg/apache/hadoop/conf/Configuration;)Z",
-            jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource, 
+            jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
             jConfiguration);
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1911,7 +2671,7 @@ static int hdfsCopyImpl(hdfsFS srcFS, const char *src, hdfsFS dstFS,
     destroyLocalReference(env, jConfiguration);
     destroyLocalReference(env, jSrcPath);
     destroyLocalReference(env, jDstPath);
-  
+
     if (ret) {
         errno = ret;
         return -1;
@@ -1955,9 +2715,9 @@ int hdfsDelete(hdfsFS fs, const char *path, int recursive)
         return -1;
     }
     jRecursive = recursive ? JNI_TRUE : JNI_FALSE;
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
-                     "delete", "(Lorg/apache/hadoop/fs/Path;Z)Z",
-                     jPath, jRecursive);
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "delete", "(Lorg/apache/hadoop/fs/Path;Z)Z", jPath,
+            jRecursive);
     destroyLocalReference(env, jPath);
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -2009,9 +2769,9 @@ int hdfsRename(hdfsFS fs, const char *oldPath, const char *newPath)
 
     // Rename the file
     // TODO: use rename2 here?  (See HDFS-3592)
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS, "rename",
-                     JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_PATH), "Z"),
-                     jOldPath, jNewPath);
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "rename", JMETHOD2(JPARAM(HADOOP_PATH), JPARAM
+            (HADOOP_PATH), "Z"), jOldPath, jNewPath);
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsRename(oldPath=%s, newPath=%s): FileSystem#rename",
@@ -2035,7 +2795,7 @@ int hdfsRename(hdfsFS fs, const char *oldPath, const char *newPath)
 char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
 {
     // JAVA EQUIVALENT:
-    //  Path p = fs.getWorkingDirectory(); 
+    //  Path p = fs.getWorkingDirectory();
     //  return p.toString()
 
     jobject jPath = NULL;
@@ -2054,9 +2814,8 @@ char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
     }
 
     //FileSystem#getWorkingDirectory()
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
-                     HADOOP_FS, "getWorkingDirectory",
-                     "()Lorg/apache/hadoop/fs/Path;");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "getWorkingDirectory", "()Lorg/apache/hadoop/fs/Path;");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsGetWorkingDirectory: FileSystem#getWorkingDirectory");
@@ -2071,9 +2830,8 @@ char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
     }
 
     //Path#toString()
-    jthr = invokeMethod(env, &jVal, INSTANCE, jPath, 
-                     "org/apache/hadoop/fs/Path", "toString",
-                     "()Ljava/lang/String;");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jPath, JC_PATH, "toString",
+            "()Ljava/lang/String;");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsGetWorkingDirectory: Path#toString");
@@ -2114,7 +2872,7 @@ char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
 int hdfsSetWorkingDirectory(hdfsFS fs, const char *path)
 {
     // JAVA EQUIVALENT:
-    //  fs.setWorkingDirectory(Path(path)); 
+    //  fs.setWorkingDirectory(Path(path));
 
     jobject jFS = (jobject)fs;
     jthrowable jthr;
@@ -2137,9 +2895,9 @@ int hdfsSetWorkingDirectory(hdfsFS fs, const char *path)
     }
 
     //FileSystem#setWorkingDirectory()
-    jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
-                     "setWorkingDirectory", 
-                     "(Lorg/apache/hadoop/fs/Path;)V", jPath);
+    jthr = invokeMethod(env, NULL, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "setWorkingDirectory", "(Lorg/apache/hadoop/fs/Path;)V",
+            jPath);
     destroyLocalReference(env, jPath);
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, NOPRINT_EXC_ILLEGAL_ARGUMENT,
@@ -2179,9 +2937,8 @@ int hdfsCreateDirectory(hdfsFS fs, const char *path)
 
     //Create the directory
     jVal.z = 0;
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
-                     "mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z",
-                     jPath);
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z", jPath);
     destroyLocalReference(env, jPath);
     if (jthr) {
         errno = printExceptionAndFree(env, jthr,
@@ -2229,9 +2986,9 @@ int hdfsSetReplication(hdfsFS fs, const char *path, int16_t replication)
     }
 
     //Create the directory
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
-                     "setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
-                     jPath, replication);
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
+            jPath, replication);
     destroyLocalReference(env, jPath);
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -2278,7 +3035,7 @@ int hdfsChown(hdfsFS fs, const char *path, const char *owner, const char *group)
         goto done;
     }
 
-    jthr = newJavaStr(env, owner, &jOwner); 
+    jthr = newJavaStr(env, owner, &jOwner);
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsChown(path=%s): newJavaStr(%s)", path, owner);
@@ -2292,8 +3049,8 @@ int hdfsChown(hdfsFS fs, const char *path, const char *owner, const char *group)
     }
 
     //Create the directory
-    jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
-            "setOwner", JMETHOD3(JPARAM(HADOOP_PATH), 
+    jthr = invokeMethod(env, NULL, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "setOwner", JMETHOD3(JPARAM(HADOOP_PATH),
                     JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JAVA_VOID),
             jPath, jOwner, jGroup);
     if (jthr) {
@@ -2337,12 +3094,12 @@ int hdfsChmod(hdfsFS fs, const char *path, short mode)
     }
 
     // construct jPerm = FsPermission.createImmutable(short mode);
-    jthr = constructNewObjectOfClass(env, &jPermObj,
-                HADOOP_FSPERM,"(S)V",jmode);
+    jthr = constructNewObjectOfCachedClass(env, &jPermObj, JC_FS_PERMISSION,
+            "(S)V",jmode);
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-            "constructNewObjectOfClass(%s)", HADOOP_FSPERM);
-        return -1;
+            "constructNewObjectOfCachedClass(%s)", HADOOP_FSPERM);
+        goto done;
     }
 
     //Create an object of org.apache.hadoop.fs.Path
@@ -2354,10 +3111,9 @@ int hdfsChmod(hdfsFS fs, const char *path, short mode)
     }
 
     //Create the directory
-    jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
-            "setPermission",
-            JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_FSPERM), JAVA_VOID),
-            jPath, jPermObj);
+    jthr = invokeMethod(env, NULL, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "setPermission", JMETHOD2(JPARAM(HADOOP_PATH),
+                    JPARAM(HADOOP_FSPERM), JAVA_VOID), jPath, jPermObj);
     if (jthr) {
         ret = printExceptionAndFree(env, jthr,
             NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_FILE_NOT_FOUND |
@@ -2407,9 +3163,9 @@ int hdfsUtime(hdfsFS fs, const char *path, tTime mtime, tTime atime)
     jmtime = (mtime == NO_CHANGE) ? -1 : (mtime * (jlong)1000);
     jatime = (atime == NO_CHANGE) ? -1 : (atime * (jlong)1000);
 
-    jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
-            "setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J", JAVA_VOID),
-            jPath, jmtime, jatime);
+    jthr = invokeMethod(env, NULL, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J",
+                    JAVA_VOID), jPath, jmtime, jatime);
     destroyLocalReference(env, jPath);
     if (jthr) {
         errno = printExceptionAndFree(env, jthr,
@@ -2485,6 +3241,8 @@ int hadoopRzOptionsSetByteBufferPool(
     JNIEnv *env;
     jthrowable jthr;
     jobject byteBufferPool = NULL;
+    jobject globalByteBufferPool = NULL;
+    int ret;
 
     env = getJNIEnv();
     if (!env) {
@@ -2501,15 +3259,37 @@ int hadoopRzOptionsSetByteBufferPool(
       if (jthr) {
           printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
               "hadoopRzOptionsSetByteBufferPool(className=%s): ", className);
-          errno = EINVAL;
-          return -1;
+          ret = EINVAL;
+          goto done;
       }
-    }
-    if (opts->byteBufferPool) {
-        // Delete any previous ByteBufferPool we had.
+      // Only set opts->byteBufferPool if creating a global reference is
+      // successful
+      globalByteBufferPool = (*env)->NewGlobalRef(env, byteBufferPool);
+      if (!globalByteBufferPool) {
+          printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+                  "hadoopRzOptionsSetByteBufferPool(className=%s): ",
+                  className);
+          ret = EINVAL;
+          goto done;
+      }
+      // Delete any previous ByteBufferPool we had before setting a new one.
+      if (opts->byteBufferPool) {
+          (*env)->DeleteGlobalRef(env, opts->byteBufferPool);
+      }
+      opts->byteBufferPool = globalByteBufferPool;
+    } else if (opts->byteBufferPool) {
+        // If the specified className is NULL, delete any previous
+        // ByteBufferPool we had.
         (*env)->DeleteGlobalRef(env, opts->byteBufferPool);
+        opts->byteBufferPool = NULL;
+    }
+    ret = 0;
+done:
+    destroyLocalReference(env, byteBufferPool);
+    if (ret) {
+        errno = ret;
+        return -1;
     }
-    opts->byteBufferPool = byteBufferPool;
     return 0;
 }
 
@@ -2549,28 +3329,28 @@ static jthrowable hadoopRzOptionsGetEnumSet(JNIEnv *env,
         goto done;
     }
     if (opts->skipChecksums) {
-        jthr = fetchEnumInstance(env, READ_OPTION,
+        jthr = fetchEnumInstance(env, HADOOP_RO,
                   "SKIP_CHECKSUMS", &enumInst);
         if (jthr) {
             goto done;
         }
-        jthr = invokeMethod(env, &jVal, STATIC, NULL,
-                "java/util/EnumSet", "of",
-                "(Ljava/lang/Enum;)Ljava/util/EnumSet;", enumInst);
+        jthr = invokeMethod(env, &jVal, STATIC, NULL, JC_ENUM_SET,
+                "of", "(Ljava/lang/Enum;)Ljava/util/EnumSet;", enumInst);
         if (jthr) {
             goto done;
         }
         enumSetObj = jVal.l;
     } else {
-        jclass clazz = (*env)->FindClass(env, READ_OPTION);
+        jclass clazz = (*env)->FindClass(env, HADOOP_RO);
         if (!clazz) {
-            jthr = newRuntimeError(env, "failed "
-                    "to find class for %s", READ_OPTION);
+            jthr = getPendingExceptionAndClear(env);
+            goto done;
+        }
+        jthr = invokeMethod(env, &jVal, STATIC, NULL, JC_ENUM_SET,
+                "noneOf", "(Ljava/lang/Class;)Ljava/util/EnumSet;", clazz);
+        if (jthr) {
             goto done;
         }
-        jthr = invokeMethod(env, &jVal, STATIC, NULL,
-                "java/util/EnumSet", "noneOf",
-                "(Ljava/lang/Class;)Ljava/util/EnumSet;", clazz);
         enumSetObj = jVal.l;
     }
     // create global ref
@@ -2599,7 +3379,7 @@ static int hadoopReadZeroExtractBuffer(JNIEnv *env,
     jarray array = NULL;
 
     jthr = invokeMethod(env, &jVal, INSTANCE, buffer->byteBuffer,
-                     "java/nio/ByteBuffer", "remaining", "()I");
+            JC_BYTE_BUFFER, "remaining", "()I");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                 "hadoopReadZeroExtractBuffer: ByteBuffer#remaining failed: ");
@@ -2607,7 +3387,7 @@ static int hadoopReadZeroExtractBuffer(JNIEnv *env,
     }
     buffer->length = jVal.i;
     jthr = invokeMethod(env, &jVal, INSTANCE, buffer->byteBuffer,
-                     "java/nio/ByteBuffer", "position", "()I");
+            JC_BYTE_BUFFER, "position", "()I");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                 "hadoopReadZeroExtractBuffer: ByteBuffer#position failed: ");
@@ -2638,7 +3418,7 @@ static int hadoopReadZeroExtractBuffer(JNIEnv *env,
     }
     // Get the backing array object of this buffer.
     jthr = invokeMethod(env, &jVal, INSTANCE, buffer->byteBuffer,
-                     "java/nio/ByteBuffer", "array", "()[B");
+            JC_BYTE_BUFFER, "array", "()[B");
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                 "hadoopReadZeroExtractBuffer: ByteBuffer#array failed: ");
@@ -2691,6 +3471,7 @@ static int translateZCRException(JNIEnv *env, jthrowable exc)
     }
     if (!strcmp(className, "java.lang.UnsupportedOperationException")) {
         ret = EPROTONOSUPPORT;
+        destroyLocalReference(env, exc);
         goto done;
     }
     ret = printExceptionAndFree(env, exc, PRINT_EXC_ALL,
@@ -2731,9 +3512,10 @@ struct hadoopRzBuffer* hadoopReadZero(hdfsFile file,
                 "hadoopReadZero: hadoopRzOptionsGetEnumSet failed: ");
         goto done;
     }
-    jthr = invokeMethod(env, &jVal, INSTANCE, file->file, HADOOP_ISTRM, "read",
-        "(Lorg/apache/hadoop/io/ByteBufferPool;ILjava/util/EnumSet;)"
-        "Ljava/nio/ByteBuffer;", opts->byteBufferPool, maxLength, enumSet);
+    jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
+            JC_FS_DATA_INPUT_STREAM, "read",
+            "(Lorg/apache/hadoop/io/ByteBufferPool;ILjava/util/EnumSet;)"
+            "Ljava/nio/ByteBuffer;", opts->byteBufferPool, maxLength, enumSet);
     if (jthr) {
         ret = translateZCRException(env, jthr);
         goto done;
@@ -2788,7 +3570,7 @@ void hadoopRzBufferFree(hdfsFile file, struct hadoopRzBuffer *buffer)
     jvalue jVal;
     jthrowable jthr;
     JNIEnv* env;
-    
+
     env = getJNIEnv();
     if (env == NULL) {
         errno = EINTERNAL;
@@ -2796,8 +3578,8 @@ void hadoopRzBufferFree(hdfsFile file, struct hadoopRzBuffer *buffer)
     }
     if (buffer->byteBuffer) {
         jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
-                    HADOOP_ISTRM, "releaseBuffer",
-                    "(Ljava/nio/ByteBuffer;)V", buffer->byteBuffer);
+                JC_FS_DATA_INPUT_STREAM, "releaseBuffer",
+                "(Ljava/nio/ByteBuffer;)V", buffer->byteBuffer);
         if (jthr) {
             printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                     "hadoopRzBufferFree: releaseBuffer failed: ");
@@ -2846,8 +3628,8 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, tOffset length)
             "hdfsGetHosts(path=%s): constructNewObjectOfPath", path);
         goto done;
     }
-    jthr = invokeMethod(env, &jFSVal, INSTANCE, jFS,
-            HADOOP_FS, "getFileStatus", "(Lorg/apache/hadoop/fs/Path;)"
+    jthr = invokeMethod(env, &jFSVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "getFileStatus", "(Lorg/apache/hadoop/fs/Path;)"
             "Lorg/apache/hadoop/fs/FileStatus;", jPath);
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, NOPRINT_EXC_FILE_NOT_FOUND,
@@ -2859,11 +3641,11 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, tOffset length)
     jFileStatus = jFSVal.l;
 
     //org.apache.hadoop.fs.FileSystem#getFileBlockLocations
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
-                     HADOOP_FS, "getFileBlockLocations", 
-                     "(Lorg/apache/hadoop/fs/FileStatus;JJ)"
-                     "[Lorg/apache/hadoop/fs/BlockLocation;",
-                     jFileStatus, start, length);
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "getFileBlockLocations",
+            "(Lorg/apache/hadoop/fs/FileStatus;JJ)"
+            "[Lorg/apache/hadoop/fs/BlockLocation;", jFileStatus, start,
+            length);
     if (jthr) {
         ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                 "hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
@@ -2890,15 +3672,17 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, tOffset length)
     for (i = 0; i < jNumFileBlocks; ++i) {
         jFileBlock =
             (*env)->GetObjectArrayElement(env, jBlockLocations, i);
-        if (!jFileBlock) {
-            ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+        jthr = (*env)->ExceptionOccurred(env);
+        if (jthr || !jFileBlock) {
+            ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                 "hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
                 "GetObjectArrayElement(%d)", path, start, length, i);
             goto done;
         }
-        
-        jthr = invokeMethod(env, &jVal, INSTANCE, jFileBlock, HADOOP_BLK_LOC,
-                         "getHosts", "()[Ljava/lang/String;");
+
+        jthr = invokeMethod(env, &jVal, INSTANCE, jFileBlock,
+                JC_BLOCK_LOCATION, "getHosts",
+                "()[Ljava/lang/String;");
         if (jthr) {
             ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                 "hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
@@ -2924,8 +3708,9 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, tOffset length)
         //Now parse each hostname
         for (j = 0; j < jNumBlockHosts; ++j) {
             jHost = (*env)->GetObjectArrayElement(env, jFileBlockHosts, j);
-            if (!jHost) {
-                ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+            jthr = (*env)->ExceptionOccurred(env);
+            if (jthr || !jHost) {
+                ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                     "hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"): "
                     "NewByteArray", path, start, length);
                 goto done;
@@ -3002,8 +3787,8 @@ tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
     }
 
     //FileSystem#getDefaultBlockSize()
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
-                     "getDefaultBlockSize", "()J");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "getDefaultBlockSize", "()J");
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsGetDefaultBlockSize: FileSystem#getDefaultBlockSize");
@@ -3066,16 +3851,16 @@ tOffset hdfsGetCapacity(hdfsFS fs)
     }
 
     //FileSystem#getStatus
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
-                     "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsGetCapacity: FileSystem#getStatus");
         return -1;
     }
     fss = (jobject)jVal.l;
-    jthr = invokeMethod(env, &jVal, INSTANCE, fss, HADOOP_FSSTATUS,
-                     "getCapacity", "()J");
+    jthr = invokeMethod(env, &jVal, INSTANCE, fss,
+            JC_FS_STATUS, "getCapacity", "()J");
     destroyLocalReference(env, fss);
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -3086,7 +3871,7 @@ tOffset hdfsGetCapacity(hdfsFS fs)
 }
 
 
-  
+
 tOffset hdfsGetUsed(hdfsFS fs)
 {
     // JAVA EQUIVALENT:
@@ -3106,16 +3891,16 @@ tOffset hdfsGetUsed(hdfsFS fs)
     }
 
     //FileSystem#getStatus
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
-                     "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
             "hdfsGetUsed: FileSystem#getStatus");
         return -1;
     }
     fss = (jobject)jVal.l;
-    jthr = invokeMethod(env, &jVal, INSTANCE, fss, HADOOP_FSSTATUS,
-                     "getUsed", "()J");
+    jthr = invokeMethod(env, &jVal, INSTANCE, fss, JC_FS_STATUS,
+            HADOOP_FSSTATUS,"getUsed", "()J");
     destroyLocalReference(env, fss);
     if (jthr) {
         errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -3124,7 +3909,7 @@ tOffset hdfsGetUsed(hdfsFS fs)
     }
     return jVal.j;
 }
- 
+
 /**
  * We cannot add new fields to the hdfsFileInfo structure because it would break
  * binary compatibility.  The reason is because we return an array
@@ -3173,46 +3958,46 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
     struct hdfsExtendedFileInfo *extInfo;
     size_t extOffset;
 
-    jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
-                     HADOOP_STAT, "isDir", "()Z");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS, "isDir",
+            "()Z");
     if (jthr)
         goto done;
     fileInfo->mKind = jVal.z ? kObjectKindDirectory : kObjectKindFile;
 
-    jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
-                     HADOOP_STAT, "getReplication", "()S");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
+            "getReplication", "()S");
     if (jthr)
         goto done;
     fileInfo->mReplication = jVal.s;
 
-    jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
-                     HADOOP_STAT, "getBlockSize", "()J");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
+            "getBlockSize", "()J");
     if (jthr)
         goto done;
     fileInfo->mBlockSize = jVal.j;
 
-    jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
-                     HADOOP_STAT, "getModificationTime", "()J");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
+            "getModificationTime", "()J");
     if (jthr)
         goto done;
     fileInfo->mLastMod = jVal.j / 1000;
 
-    jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
-                     HADOOP_STAT, "getAccessTime", "()J");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
+            "getAccessTime", "()J");
     if (jthr)
         goto done;
     fileInfo->mLastAccess = (tTime) (jVal.j / 1000);
 
     if (fileInfo->mKind == kObjectKindFile) {
-        jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
-                         HADOOP_STAT, "getLen", "()J");
+        jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
+                "getLen", "()J");
         if (jthr)
             goto done;
         fileInfo->mSize = jVal.j;
     }
 
-    jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
-                     "getPath", "()Lorg/apache/hadoop/fs/Path;");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jStat,  JC_FILE_STATUS,
+            "getPath", "()Lorg/apache/hadoop/fs/Path;");
     if (jthr)
         goto done;
     jPath = jVal.l;
@@ -3222,8 +4007,8 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
         goto done;
     }
 
-    jthr = invokeMethod(env, &jVal, INSTANCE, jPath, HADOOP_PATH,
-                     "toString", "()Ljava/lang/String;");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jPath, JC_PATH, "toString",
+            "()Ljava/lang/String;");
     if (jthr)
         goto done;
     jPathName = jVal.l;
@@ -3235,8 +4020,8 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
     }
     fileInfo->mName = strdup(cPathName);
     (*env)->ReleaseStringUTFChars(env, jPathName, cPathName);
-    jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
-                    "getOwner", "()Ljava/lang/String;");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS, "getOwner",
+            "()Ljava/lang/String;");
     if (jthr)
         goto done;
     jUserName = jVal.l;
@@ -3256,16 +4041,16 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
     (*env)->ReleaseStringUTFChars(env, jUserName, cUserName);
     extInfo = getExtendedFileInfo(fileInfo);
     memset(extInfo, 0, sizeof(*extInfo));
-    jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
-                    HADOOP_STAT, "isEncrypted", "()Z");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
+            "isEncrypted", "()Z");
     if (jthr) {
         goto done;
     }
     if (jVal.z == JNI_TRUE) {
         extInfo->flags |= HDFS_EXTENDED_FILE_INFO_ENCRYPTED;
     }
-    jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
-                    "getGroup", "()Ljava/lang/String;");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
+            "getGroup", "()Ljava/lang/String;");
     if (jthr)
         goto done;
     jGroupName = jVal.l;
@@ -3277,19 +4062,19 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
     fileInfo->mGroup = strdup(cGroupName);
     (*env)->ReleaseStringUTFChars(env, jGroupName, cGroupName);
 
-    jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
+    jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
             "getPermission",
             "()Lorg/apache/hadoop/fs/permission/FsPermission;");
     if (jthr)
         goto done;
     if (jVal.l == NULL) {
         jthr = newRuntimeError(env, "%s#getPermission returned NULL!",
-            HADOOP_STAT);
+            HADOOP_FILESTAT);
         goto done;
     }
     jPermission = jVal.l;
-    jthr = invokeMethod(env, &jVal, INSTANCE, jPermission, HADOOP_FSPERM,
-                         "toShort", "()S");
+    jthr = invokeMethod(env, &jVal, INSTANCE, jPermission,
+            JC_FS_PERMISSION, "toShort", "()S");
     if (jthr)
         goto done;
     fileInfo->mPermissions = jVal.s;
@@ -3303,7 +4088,6 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
     destroyLocalReference(env, jUserName);
     destroyLocalReference(env, jGroupName);
     destroyLocalReference(env, jPermission);
-    destroyLocalReference(env, jPath);
     return jthr;
 }
 
@@ -3323,18 +4107,17 @@ getFileInfo(JNIEnv *env, jobject jFS, jobject jPath, hdfsFileInfo **fileInfo)
     jvalue  jVal;
     jthrowable jthr;
 
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
-                     "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
-                     jPath);
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM, "exists",
+            JMETHOD1(JPARAM(HADOOP_PATH), "Z"), jPath);
     if (jthr)
         return jthr;
     if (jVal.z == 0) {
         *fileInfo = NULL;
         return NULL;
     }
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
-            HADOOP_FS, "getFileStatus",
-            JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_STAT)), jPath);
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+            "getFileStatus", JMETHOD1(JPARAM(HADOOP_PATH), JPARAM
+            (HADOOP_FILESTAT)), jPath);
     if (jthr)
         return jthr;
     jStat = jVal.l;
@@ -3343,7 +4126,7 @@ getFileInfo(JNIEnv *env, jobject jFS, jobject jPath, hdfsFileInfo **fileInfo)
         destroyLocalReference(env, jStat);
         return newRuntimeError(env, "getFileInfo: OOM allocating hdfsFileInfo");
     }
-    jthr = getFileInfoFromStat(env, jStat, *fileInfo); 
+    jthr = getFileInfoFromStat(env, jStat, *fileInfo);
     destroyLocalReference(env, jStat);
     return jthr;
 }
@@ -3355,13 +4138,13 @@ hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char *path, int *numEntries)
     // JAVA EQUIVALENT:
     //  Path p(path);
     //  Path []pathList = fs.listPaths(p)
-    //  foreach path in pathList 
+    //  foreach path in pathList
     //    getFileInfo(path)
 
     jobject jFS = (jobject)fs;
     jthrowable jthr;
     jobject jPath = NULL;
-    hdfsFileInfo *pathList = NULL; 
+    hdfsFileInfo *pathList = NULL;
     jobjectArray jPathList = NULL;
     jvalue jVal;
     jsize jPathListSize = 0;
@@ -3384,9 +4167,9 @@ hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char *path, int *numEntries)
         goto done;
     }
 
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_DFS, "listStatus",
-                     JMETHOD1(JPARAM(HADOOP_PATH), JARRPARAM(HADOOP_STAT)),
-                     jPath);
+    jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
+            JC_DISTRIBUTED_FILE_SYSTEM, "listStatus",
+            JMETHOD1(JPARAM(HADOOP_PATH), JARRPARAM(HADOOP_FILESTAT)), jPath);
     if (jthr) {
         ret = printExceptionAndFree(env, jthr,
             NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_FILE_NOT_FOUND |
@@ -3413,8 +4196,9 @@ hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char *path, int *numEntries)
     //Save path information in pathList
     for (i=0; i < jPathListSize; ++i) {
         tmpStat = (*env)->GetObjectArrayElement(env, jPathList, i);
-        if (!tmpStat) {
-            ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+        jthr = (*env)->ExceptionOccurred(env);
+        if (jthr || !tmpStat) {
+            ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                 "hdfsListDirectory(%s): GetObjectArrayElement(%d out of %d)",
                 path, i, jPathListSize);
             goto done;
@@ -3530,4 +4314,4 @@ char* hdfsGetLastExceptionStackTrace()
 
 /**
  * vim: ts=4: sw=4: et:
- */
+ */
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
index 7e45634d4e02b..eba50ff6eb277 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
@@ -82,6 +82,29 @@ extern  "C" {
     } tObjectKind;
     struct hdfsStreamBuilder;
 
+    /**
+     * The C reflection of the enum values from java.util.concurrent.TimeUnit .
+     */
+    typedef enum javaConcurrentTimeUnit {
+        jNanoseconds,
+        jMicroseconds,
+        jMilliseconds,
+        jSeconds,
+        jMinutes,
+        jHours,
+        jDays,
+    } javaConcurrentTimeUnit;
+
+    /**
+     * The C reflection of java.util.concurrent.Future specifically used for
+     * opening HDFS files asynchronously.
+     */
+    typedef struct hdfsOpenFileFuture hdfsOpenFileFuture;
+
+    /**
+     * The C reflection of o.a.h.fs.FutureDataInputStreamBuilder .
+     */
+    typedef struct hdfsOpenFileBuilder hdfsOpenFileBuilder;
 
     /**
      * The C reflection of org.apache.org.hadoop.FileSystem .
@@ -429,6 +452,118 @@ extern  "C" {
     hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
                           int bufferSize, short replication, tSize blocksize);
 
+    /**
+     * hdfsOpenFileBuilderAlloc - Allocate a HDFS open file builder.
+     *
+     * @param fs The configured filesystem handle.
+     * @param path The full path to the file.
+     * @return Returns the hdfsOpenFileBuilder, or NULL on error.
+     */
+    LIBHDFS_EXTERNAL
+    hdfsOpenFileBuilder *hdfsOpenFileBuilderAlloc(hdfsFS fs,
+            const char *path);
+
+    /**
+     * hdfsOpenFileBuilderMust - Specifies a mandatory parameter for the open
+     * file builder. While the underlying FsBuilder supports various various
+     * types for the value (boolean, int, float, double), currently only
+     * strings are supported.
+     *
+     * @param builder The open file builder to set the config for.
+     * @param key The config key
+     * @param value The config value
+     * @return Returns the hdfsOpenFileBuilder, or NULL on error.
+     */
+    LIBHDFS_EXTERNAL
+    hdfsOpenFileBuilder *hdfsOpenFileBuilderMust(hdfsOpenFileBuilder *builder,
+            const char *key, const char *value);
+
+    /**
+     * hdfsOpenFileBuilderOpt - Specifies an optional parameter for the open
+     * file builder. While the underlying FsBuilder supports various various
+     * types for the value (boolean, int, float, double), currently only
+     * strings are supported.
+     *
+     * @param builder The open file builder to set the config for.
+     * @param key The config key
+     * @param value The config value
+     * @return Returns the hdfsOpenFileBuilder, or NULL on error.
+     */
+    LIBHDFS_EXTERNAL
+    hdfsOpenFileBuilder *hdfsOpenFileBuilderOpt(hdfsOpenFileBuilder *builder,
+            const char *key, const char *value);
+
+    /**
+     * hdfsOpenFileBuilderBuild - Builds the open file builder and returns a
+     * hdfsOpenFileFuture which tracks the asynchronous call to open the
+     * specified file.
+     *
+     * @param builder The open file builder to build.
+     * @return Returns the hdfsOpenFileFuture, or NULL on error.
+     */
+    LIBHDFS_EXTERNAL
+    hdfsOpenFileFuture *hdfsOpenFileBuilderBuild(hdfsOpenFileBuilder *builder);
+
+    /**
+     * hdfsOpenFileBuilderFree - Free a HDFS open file builder.
+     *
+     * It is normally not necessary to call this function since
+     * hdfsOpenFileBuilderBuild frees the builder.
+     *
+     * @param builder The hdfsOpenFileBuilder to free.
+     */
+    LIBHDFS_EXTERNAL
+    void hdfsOpenFileBuilderFree(hdfsOpenFileBuilder *builder);
+
+    /**
+     * hdfsOpenFileFutureGet - Call Future#get() on the underlying Java Future
+     * object. A call to #get() will block until the asynchronous operation has
+     * completed. In this case, until the open file call has completed. This
+     * method blocks indefinitely until blocking call completes.
+     *
+     * @param future The hdfsOpenFileFuture to call #get on
+     * @return Returns the opened hdfsFile, or NULL on error.
+     */
+    LIBHDFS_EXTERNAL
+    hdfsFile hdfsOpenFileFutureGet(hdfsOpenFileFuture *future);
+
+    /**
+     * hdfsOpenFileFutureGetWithTimeout - Call Future#get(long, TimeUnit) on
+     * the underlying Java Future object. A call to #get(long, TimeUnit) will
+     * block until the asynchronous operation has completed (in this case,
+     * until the open file call has completed) or the specified timeout has
+     * been reached.
+     *
+     * @param future The hdfsOpenFileFuture to call #get on
+     * @return Returns the opened hdfsFile, or NULL on error or if the timeout
+     *         has been reached.
+     */
+    LIBHDFS_EXTERNAL
+    hdfsFile hdfsOpenFileFutureGetWithTimeout(hdfsOpenFileFuture *future,
+            int64_t timeout, javaConcurrentTimeUnit timeUnit);
+
+    /**
+     * hdfsOpenFileFutureCancel - Call Future#cancel(boolean) on the
+     * underlying Java Future object. The value of mayInterruptedIfRunning
+     * controls whether the Java thread running the Future should be
+     * interrupted or not.
+     *
+     * @param future The hdfsOpenFileFuture to call #cancel on
+     * @param mayInterruptIfRunning if true, interrupts the running thread
+     * @return Returns 0 if the thread was successfully cancelled, else -1
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsOpenFileFutureCancel(hdfsOpenFileFuture *future,
+            int mayInterruptIfRunning);
+
+    /**
+     * hdfsOpenFileFutureFree - Free a HDFS open file future.
+     *
+     * @param hdfsOpenFileFuture The hdfsOpenFileFuture to free.
+     */
+    LIBHDFS_EXTERNAL
+    void hdfsOpenFileFutureFree(hdfsOpenFileFuture *future);
+
     /**
      * hdfsStreamBuilderAlloc - Allocate an HDFS stream builder.
      *
@@ -600,7 +735,8 @@ extern  "C" {
     tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length);
 
     /** 
-     * hdfsPread - Positional read of data from an open file.
+     * hdfsPread - Positional read of data from an open file. Reads up to the
+     * number of specified bytes in length.
      * @param fs The configured filesystem handle.
      * @param file The file handle.
      * @param position Position from which to read
@@ -612,6 +748,24 @@ extern  "C" {
     tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position,
                     void* buffer, tSize length);
 
+    /**
+     * hdfsPreadFully - Positional read of data from an open file. Reads the
+     * number of specified bytes in length, or until the end of the data is
+     * reached. Unlike hdfsRead and hdfsPread, this method does not return
+     * the number of bytes read because either (1) the entire length of the
+     * buffer is filled, or (2) the end of the file is reached. If the eof is
+     * reached, an exception is thrown and errno is set to EINTR.
+     * @param fs The configured filesystem handle.
+     * @param file The file handle.
+     * @param position Position from which to read
+     * @param buffer The buffer to copy read bytes into.
+     * @param length The length of the buffer.
+     * @return Returns 0 on success, -1 on error.
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsPreadFully(hdfsFS fs, hdfsFile file, tOffset position,
+                    void* buffer, tSize length);
+
 
     /** 
      * hdfsWrite - Write data into an open file.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jclasses.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jclasses.c
new file mode 100644
index 0000000000000..9f589ac257aa1
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jclasses.c
@@ -0,0 +1,140 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "exception.h"
+#include "jclasses.h"
+#include "jni_helper.h"
+#include "os/mutexes.h"
+
+#include 
+
+/**
+ * Whether initCachedClasses has been called or not. Protected by the mutex
+ * jclassInitMutex.
+ */
+static int jclassesInitialized = 0;
+
+typedef struct {
+    jclass javaClass;
+    const char *className;
+} javaClassAndName;
+
+/**
+ * A collection of commonly used jclass objects that are used throughout
+ * libhdfs. The jclasses are loaded immediately after the JVM is created (see
+ * initCachedClasses). The array is indexed using CachedJavaClass.
+ */
+javaClassAndName cachedJavaClasses[NUM_CACHED_CLASSES];
+
+/**
+ * Helper method that creates and sets a jclass object given a class name.
+ * Returns a jthrowable on error, NULL otherwise.
+ */
+static jthrowable initCachedClass(JNIEnv *env, const char *className,
+        jclass *cachedJclass) {
+    assert(className != NULL && "Found a CachedJavaClass without a class "
+                                "name");
+    jthrowable jthr = NULL;
+    jclass tempLocalClassRef;
+    tempLocalClassRef = (*env)->FindClass(env, className);
+    if (!tempLocalClassRef) {
+        jthr = getPendingExceptionAndClear(env);
+        goto done;
+    }
+    *cachedJclass = (jclass) (*env)->NewGlobalRef(env, tempLocalClassRef);
+    if (!*cachedJclass) {
+        jthr = getPendingExceptionAndClear(env);
+        goto done;
+    }
+done:
+    destroyLocalReference(env, tempLocalClassRef);
+    return jthr;
+}
+
+jthrowable initCachedClasses(JNIEnv* env) {
+    mutexLock(&jclassInitMutex);
+    if (!jclassesInitialized) {
+        // Set all the class names
+        cachedJavaClasses[JC_CONFIGURATION].className =
+                "org/apache/hadoop/conf/Configuration";
+        cachedJavaClasses[JC_PATH].className =
+                "org/apache/hadoop/fs/Path";
+        cachedJavaClasses[JC_FILE_SYSTEM].className =
+                "org/apache/hadoop/fs/FileSystem";
+        cachedJavaClasses[JC_FS_STATUS].className =
+                "org/apache/hadoop/fs/FsStatus";
+        cachedJavaClasses[JC_FILE_UTIL].className =
+                "org/apache/hadoop/fs/FileUtil";
+        cachedJavaClasses[JC_BLOCK_LOCATION].className =
+                "org/apache/hadoop/fs/BlockLocation";
+        cachedJavaClasses[JC_DFS_HEDGED_READ_METRICS].className =
+                "org/apache/hadoop/hdfs/DFSHedgedReadMetrics";
+        cachedJavaClasses[JC_DISTRIBUTED_FILE_SYSTEM].className =
+                "org/apache/hadoop/hdfs/DistributedFileSystem";
+        cachedJavaClasses[JC_FS_DATA_INPUT_STREAM].className =
+                "org/apache/hadoop/fs/FSDataInputStream";
+        cachedJavaClasses[JC_FS_DATA_OUTPUT_STREAM].className =
+                "org/apache/hadoop/fs/FSDataOutputStream";
+        cachedJavaClasses[JC_FILE_STATUS].className =
+                "org/apache/hadoop/fs/FileStatus";
+        cachedJavaClasses[JC_FS_PERMISSION].className =
+                "org/apache/hadoop/fs/permission/FsPermission";
+        cachedJavaClasses[JC_READ_STATISTICS].className =
+                "org/apache/hadoop/hdfs/ReadStatistics";
+        cachedJavaClasses[JC_HDFS_DATA_INPUT_STREAM].className =
+                "org/apache/hadoop/hdfs/client/HdfsDataInputStream";
+        cachedJavaClasses[JC_FUTURE_DATA_IS_BUILDER].className =
+                "org/apache/hadoop/fs/FutureDataInputStreamBuilder";
+        cachedJavaClasses[JC_DOMAIN_SOCKET].className =
+                "org/apache/hadoop/net/unix/DomainSocket";
+        cachedJavaClasses[JC_URI].className =
+                "java/net/URI";
+        cachedJavaClasses[JC_BYTE_BUFFER].className =
+                "java/nio/ByteBuffer";
+        cachedJavaClasses[JC_ENUM_SET].className =
+                "java/util/EnumSet";
+        cachedJavaClasses[JC_EXCEPTION_UTILS].className =
+                "org/apache/commons/lang3/exception/ExceptionUtils";
+        cachedJavaClasses[JC_CFUTURE].className =
+                "java/util/concurrent/CompletableFuture";
+
+        // Create and set the jclass objects based on the class names set above
+        jthrowable jthr;
+        int numCachedClasses =
+                sizeof(cachedJavaClasses) / sizeof(javaClassAndName);
+        for (int i = 0; i < numCachedClasses; i++) {
+            jthr = initCachedClass(env, cachedJavaClasses[i].className,
+                                   &cachedJavaClasses[i].javaClass);
+            if (jthr) {
+                mutexUnlock(&jclassInitMutex);
+                return jthr;
+            }
+        }
+        jclassesInitialized = 1;
+    }
+    mutexUnlock(&jclassInitMutex);
+    return NULL;
+}
+
+jclass getJclass(CachedJavaClass cachedJavaClass) {
+    return cachedJavaClasses[cachedJavaClass].javaClass;
+}
+
+const char *getClassName(CachedJavaClass cachedJavaClass) {
+    return cachedJavaClasses[cachedJavaClass].className;
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jclasses.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jclasses.h
new file mode 100644
index 0000000000000..0b174e1fecc56
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jclasses.h
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_JCLASSES_H
+#define LIBHDFS_JCLASSES_H
+
+#include 
+
+/**
+ * Encapsulates logic to cache jclass objects so they can re-used across
+ * calls to FindClass. Creating jclass objects every time libhdfs has to
+ * invoke a method can hurt performance. By cacheing jclass objects we avoid
+ * this overhead.
+ *
+ * We use the term "cached" here loosely; jclasses are not truly cached,
+ * instead they are created once during JVM load and are kept alive until the
+ * process shutdowns. There is no eviction of jclass objects.
+ *
+ * @see https://www.ibm.com/developerworks/library/j-jni/index.html#notc
+ */
+
+/**
+ * Each enum value represents one jclass that is cached. Enum values should
+ * be passed to getJclass or getName to get the jclass object or class name
+ * represented by the enum value.
+ */
+typedef enum {
+    JC_CONFIGURATION,
+    JC_PATH,
+    JC_FILE_SYSTEM,
+    JC_FS_STATUS,
+    JC_FILE_UTIL,
+    JC_BLOCK_LOCATION,
+    JC_DFS_HEDGED_READ_METRICS,
+    JC_DISTRIBUTED_FILE_SYSTEM,
+    JC_FS_DATA_INPUT_STREAM,
+    JC_FS_DATA_OUTPUT_STREAM,
+    JC_FILE_STATUS,
+    JC_FS_PERMISSION,
+    JC_READ_STATISTICS,
+    JC_HDFS_DATA_INPUT_STREAM,
+    JC_FUTURE_DATA_IS_BUILDER,
+    JC_DOMAIN_SOCKET,
+    JC_URI,
+    JC_BYTE_BUFFER,
+    JC_ENUM_SET,
+    JC_EXCEPTION_UTILS,
+    JC_CFUTURE,
+    // A special marker enum that counts the number of cached jclasses
+    NUM_CACHED_CLASSES
+} CachedJavaClass;
+
+/**
+ * Internally initializes all jclass objects listed in the CachedJavaClass
+ * enum. This method is idempotent and thread-safe.
+ */
+jthrowable initCachedClasses(JNIEnv* env);
+
+/**
+ * Return the jclass object represented by the given CachedJavaClass
+ */
+jclass getJclass(CachedJavaClass cachedJavaClass);
+
+/**
+ * Return the class name represented by the given CachedJavaClass
+ */
+const char *getClassName(CachedJavaClass cachedJavaClass);
+
+/* Some frequently used HDFS class names */
+#define HADOOP_CONF     "org/apache/hadoop/conf/Configuration"
+#define HADOOP_PATH     "org/apache/hadoop/fs/Path"
+#define HADOOP_LOCALFS  "org/apache/hadoop/fs/LocalFileSystem"
+#define HADOOP_FS       "org/apache/hadoop/fs/FileSystem"
+#define HADOOP_FSSTATUS "org/apache/hadoop/fs/FsStatus"
+#define HADOOP_FILEUTIL "org/apache/hadoop/fs/FileUtil"
+#define HADOOP_BLK_LOC  "org/apache/hadoop/fs/BlockLocation"
+#define HADOOP_DFS_HRM  "org/apache/hadoop/hdfs/DFSHedgedReadMetrics"
+#define HADOOP_DFS      "org/apache/hadoop/hdfs/DistributedFileSystem"
+#define HADOOP_FSDISTRM "org/apache/hadoop/fs/FSDataInputStream"
+#define HADOOP_FSDOSTRM "org/apache/hadoop/fs/FSDataOutputStream"
+#define HADOOP_FILESTAT "org/apache/hadoop/fs/FileStatus"
+#define HADOOP_FSPERM   "org/apache/hadoop/fs/permission/FsPermission"
+#define HADOOP_RSTAT    "org/apache/hadoop/hdfs/ReadStatistics"
+#define HADOOP_HDISTRM  "org/apache/hadoop/hdfs/client/HdfsDataInputStream"
+#define HADOOP_FDISB    "org/apache/hadoop/fs/FutureDataInputStreamBuilder"
+#define HADOOP_FS_BLDR  "org/apache/hadoop/fs/FSBuilder"
+#define HADOOP_RO       "org/apache/hadoop/fs/ReadOption"
+#define HADOOP_DS       "org/apache/hadoop/net/unix/DomainSocket"
+
+/* Some frequently used Java class names */
+#define JAVA_NET_ISA    "java/net/InetSocketAddress"
+#define JAVA_NET_URI    "java/net/URI"
+#define JAVA_BYTEBUFFER "java/nio/ByteBuffer"
+#define JAVA_STRING     "java/lang/String"
+#define JAVA_ENUMSET    "java/util/EnumSet"
+#define JAVA_CFUTURE    "java/util/concurrent/CompletableFuture"
+#define JAVA_TIMEUNIT   "java/util/concurrent/TimeUnit"
+#define JAVA_OBJECT     "java/lang/Object"
+
+/* Some frequently used third-party class names */
+
+#define EXCEPTION_UTILS "org/apache/commons/lang3/exception/ExceptionUtils"
+
+#endif /*LIBHDFS_JCLASSES_H*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
index 91a3c1cafc8f4..ccc1e3f6b8f0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
@@ -18,9 +18,9 @@
 
 #include "config.h"
 #include "exception.h"
+#include "jclasses.h"
 #include "jni_helper.h"
 #include "platform.h"
-#include "common/htable.h"
 #include "os/mutexes.h"
 #include "os/thread_local_storage.h"
 
@@ -29,8 +29,6 @@
 #include  
 #include  
 
-static struct htable *gClassRefHTable = NULL;
-
 /** The Native return types that methods could return */
 #define JVOID         'V'
 #define JOBJECT       'L'
@@ -44,13 +42,6 @@ static struct htable *gClassRefHTable = NULL;
 #define JFLOAT        'F'
 #define JDOUBLE       'D'
 
-
-/**
- * MAX_HASH_TABLE_ELEM: The maximum no. of entries in the hashtable.
- * It's set to 4096 to account for (classNames + No. of threads)
- */
-#define MAX_HASH_TABLE_ELEM 4096
-
 /**
  * Length of buffer for retrieving created JVMs.  (We only ever create one.)
  */
@@ -108,32 +99,27 @@ jthrowable newCStr(JNIEnv *env, jstring jstr, char **out)
     return NULL;
 }
 
-jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
-                 jobject instObj, const char *className,
-                 const char *methName, const char *methSignature, ...)
+/**
+ * Does the work to actually execute a Java method. Takes in an existing jclass
+ * object and a va_list of arguments for the Java method to be invoked.
+ */
+static jthrowable invokeMethodOnJclass(JNIEnv *env, jvalue *retval,
+        MethType methType, jobject instObj, jclass cls, const char *className,
+        const char *methName, const char *methSignature, va_list args)
 {
-    va_list args;
-    jclass cls;
     jmethodID mid;
     jthrowable jthr;
-    const char *str; 
+    const char *str;
     char returnType;
-    
-    jthr = validateMethodType(env, methType);
-    if (jthr)
-        return jthr;
-    jthr = globalClassReference(className, env, &cls);
-    if (jthr)
-        return jthr;
-    jthr = methodIdFromClass(className, methName, methSignature, 
-                            methType, env, &mid);
+
+    jthr = methodIdFromClass(cls, className, methName, methSignature, methType,
+                             env, &mid);
     if (jthr)
         return jthr;
     str = methSignature;
     while (*str != ')') str++;
     str++;
     returnType = *str;
-    va_start(args, methSignature);
     if (returnType == JOBJECT || returnType == JARRAYOBJECT) {
         jobject jobj = NULL;
         if (methType == STATIC) {
@@ -192,7 +178,6 @@ jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
         }
         retval->i = ji;
     }
-    va_end(args);
 
     jthr = (*env)->ExceptionOccurred(env);
     if (jthr) {
@@ -202,43 +187,115 @@ jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
     return NULL;
 }
 
-jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out, const char *className, 
-                                  const char *ctorSignature, ...)
+jthrowable findClassAndInvokeMethod(JNIEnv *env, jvalue *retval,
+        MethType methType, jobject instObj, const char *className,
+        const char *methName, const char *methSignature, ...)
 {
+    jclass cls = NULL;
+    jthrowable jthr = NULL;
+
     va_list args;
-    jclass cls;
-    jmethodID mid; 
+    va_start(args, methSignature);
+
+    jthr = validateMethodType(env, methType);
+    if (jthr) {
+        goto done;
+    }
+
+    cls = (*env)->FindClass(env, className);
+    if (!cls) {
+        jthr = getPendingExceptionAndClear(env);
+        goto done;
+    }
+
+    jthr = invokeMethodOnJclass(env, retval, methType, instObj, cls,
+            className, methName, methSignature, args);
+
+done:
+    va_end(args);
+    destroyLocalReference(env, cls);
+    return jthr;
+}
+
+jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
+        jobject instObj, CachedJavaClass class,
+        const char *methName, const char *methSignature, ...)
+{
+    jthrowable jthr;
+
+    va_list args;
+    va_start(args, methSignature);
+
+    jthr = invokeMethodOnJclass(env, retval, methType, instObj,
+            getJclass(class), getClassName(class), methName, methSignature,
+            args);
+
+    va_end(args);
+    return jthr;
+}
+
+static jthrowable constructNewObjectOfJclass(JNIEnv *env,
+        jobject *out, jclass cls, const char *className,
+                const char *ctorSignature, va_list args) {
+    jmethodID mid;
     jobject jobj;
     jthrowable jthr;
 
-    jthr = globalClassReference(className, env, &cls);
+    jthr = methodIdFromClass(cls, className, "", ctorSignature, INSTANCE,
+            env, &mid);
     if (jthr)
         return jthr;
-    jthr = methodIdFromClass(className, "", ctorSignature, 
-                            INSTANCE, env, &mid);
-    if (jthr)
-        return jthr;
-    va_start(args, ctorSignature);
     jobj = (*env)->NewObjectV(env, cls, mid, args);
-    va_end(args);
     if (!jobj)
         return getPendingExceptionAndClear(env);
     *out = jobj;
     return NULL;
 }
 
-
-jthrowable methodIdFromClass(const char *className, const char *methName, 
-                            const char *methSignature, MethType methType, 
-                            JNIEnv *env, jmethodID *out)
+jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out,
+        const char *className, const char *ctorSignature, ...)
 {
+    va_list args;
     jclass cls;
+    jthrowable jthr = NULL;
+
+    cls = (*env)->FindClass(env, className);
+    if (!cls) {
+        jthr = getPendingExceptionAndClear(env);
+        goto done;
+    }
+
+    va_start(args, ctorSignature);
+    jthr = constructNewObjectOfJclass(env, out, cls, className,
+            ctorSignature, args);
+    va_end(args);
+done:
+    destroyLocalReference(env, cls);
+    return jthr;
+}
+
+jthrowable constructNewObjectOfCachedClass(JNIEnv *env, jobject *out,
+        CachedJavaClass cachedJavaClass, const char *ctorSignature, ...)
+{
+    jthrowable jthr = NULL;
+    va_list args;
+    va_start(args, ctorSignature);
+
+    jthr = constructNewObjectOfJclass(env, out,
+            getJclass(cachedJavaClass), getClassName(cachedJavaClass),
+            ctorSignature, args);
+
+    va_end(args);
+    return jthr;
+}
+
+jthrowable methodIdFromClass(jclass cls, const char *className,
+        const char *methName, const char *methSignature, MethType methType,
+        JNIEnv *env, jmethodID *out)
+{
     jthrowable jthr;
     jmethodID mid = 0;
 
-    jthr = globalClassReference(className, env, &cls);
-    if (jthr)
-        return jthr;
     jthr = validateMethodType(env, methType);
     if (jthr)
         return jthr;
@@ -257,54 +314,6 @@ jthrowable methodIdFromClass(const char *className, const char *methName,
     return NULL;
 }
 
-jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out)
-{
-    jthrowable jthr = NULL;
-    jclass local_clazz = NULL;
-    jclass clazz = NULL;
-    int ret;
-
-    mutexLock(&hdfsHashMutex);
-    if (!gClassRefHTable) {
-        gClassRefHTable = htable_alloc(MAX_HASH_TABLE_ELEM, ht_hash_string,
-            ht_compare_string);
-        if (!gClassRefHTable) {
-            jthr = newRuntimeError(env, "htable_alloc failed\n");
-            goto done;
-        }
-    }
-    clazz = htable_get(gClassRefHTable, className);
-    if (clazz) {
-        *out = clazz;
-        goto done;
-    }
-    local_clazz = (*env)->FindClass(env,className);
-    if (!local_clazz) {
-        jthr = getPendingExceptionAndClear(env);
-        goto done;
-    }
-    clazz = (*env)->NewGlobalRef(env, local_clazz);
-    if (!clazz) {
-        jthr = getPendingExceptionAndClear(env);
-        goto done;
-    }
-    ret = htable_put(gClassRefHTable, (void*)className, clazz);
-    if (ret) {
-        jthr = newRuntimeError(env, "htable_put failed with error "
-                               "code %d\n", ret);
-        goto done;
-    }
-    *out = clazz;
-    jthr = NULL;
-done:
-    mutexUnlock(&hdfsHashMutex);
-    (*env)->DeleteLocalRef(env, local_clazz);
-    if (jthr && clazz) {
-        (*env)->DeleteGlobalRef(env, clazz);
-    }
-    return jthr;
-}
-
 jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
 {
     jthrowable jthr;
@@ -358,7 +367,6 @@ jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
     return jthr;
 }
 
-
 /**
  * For the given path, expand it by filling in with all *.jar or *.JAR files,
  * separated by PATH_SEPARATOR. Assumes that expanded is big enough to hold the
@@ -731,14 +739,17 @@ static JNIEnv* getGlobalJNIEnv(void)
                     "with error: %d\n", rv);
             return NULL;
         }
-        jthr = invokeMethod(env, NULL, STATIC, NULL,
-                         "org/apache/hadoop/fs/FileSystem",
-                         "loadFileSystems", "()V");
+
+        // We use findClassAndInvokeMethod here because the jclasses in
+        // jclasses.h have not loaded yet
+        jthr = findClassAndInvokeMethod(env, NULL, STATIC, NULL, HADOOP_FS,
+                "loadFileSystems", "()V");
         if (jthr) {
-            printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "loadFileSystems");
+            printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                    "FileSystem: loadFileSystems failed");
+            return NULL;
         }
-    }
-    else {
+    } else {
         //Attach this thread to the VM
         vm = vmBuf[0];
         rv = (*vm)->AttachCurrentThread(vm, (void*)&env, 0);
@@ -809,6 +820,15 @@ JNIEnv* getJNIEnv(void)
 
     state->env = getGlobalJNIEnv();
     mutexUnlock(&jvmMutex);
+
+    jthrowable jthr = NULL;
+    jthr = initCachedClasses(state->env);
+    if (jthr) {
+      printExceptionAndFree(state->env, jthr, PRINT_EXC_ALL,
+                            "initCachedClasses failed");
+      goto fail;
+    }
+
     if (!state->env) {
       goto fail;
     }
@@ -898,8 +918,7 @@ jthrowable hadoopConfSetStr(JNIEnv *env, jobject jConfiguration,
     if (jthr)
         goto done;
     jthr = invokeMethod(env, NULL, INSTANCE, jConfiguration,
-            "org/apache/hadoop/conf/Configuration", "set", 
-            "(Ljava/lang/String;Ljava/lang/String;)V",
+            JC_CONFIGURATION, "set", "(Ljava/lang/String;Ljava/lang/String;)V",
             jkey, jvalue);
     if (jthr)
         goto done;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.h
index f0d06d72fc040..41d6fab2a75ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.h
@@ -19,6 +19,8 @@
 #ifndef LIBHDFS_JNI_HELPER_H
 #define LIBHDFS_JNI_HELPER_H
 
+#include "jclasses.h"
+
 #include 
 #include 
 
@@ -36,7 +38,6 @@
 
 // #define _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
 
-
 /** Denote the method we want to invoke as STATIC or INSTANCE */
 typedef enum {
     STATIC,
@@ -74,12 +75,12 @@ jthrowable newJavaStr(JNIEnv *env, const char *str, jstring *out);
 void destroyLocalReference(JNIEnv *env, jobject jObject);
 
 /** invokeMethod: Invoke a Static or Instance method.
- * className: Name of the class where the method can be found
  * methName: Name of the method
  * methSignature: the signature of the method "(arg-types)ret-type"
  * methType: The type of the method (STATIC or INSTANCE)
  * instObj: Required if the methType is INSTANCE. The object to invoke
    the method on.
+ * class: The CachedJavaClass to call the method on.
  * env: The JNIEnv pointer
  * retval: The pointer to a union type which will contain the result of the
    method invocation, e.g. if the method returns an Object, retval will be
@@ -91,17 +92,33 @@ void destroyLocalReference(JNIEnv *env, jobject jObject);
    a valid exception reference, and the result stored at retval is undefined.
  */
 jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
-                 jobject instObj, const char *className, const char *methName, 
-                 const char *methSignature, ...);
+        jobject instObj, CachedJavaClass class,
+        const char *methName, const char *methSignature, ...);
 
-jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out, const char *className, 
-                                  const char *ctorSignature, ...);
+/**
+ * findClassAndInvokeMethod: Same as invokeMethod, but it calls FindClass on
+ * the given className first and then calls invokeMethod. This method exists
+ * mainly for test infrastructure, any production code should use
+ * invokeMethod. Calling FindClass repeatedly can introduce performance
+ * overhead, so users should prefer invokeMethod and supply a CachedJavaClass.
+ */
+jthrowable findClassAndInvokeMethod(JNIEnv *env, jvalue *retval,
+        MethType methType, jobject instObj, const char *className,
+        const char *methName, const char *methSignature, ...);
 
-jthrowable methodIdFromClass(const char *className, const char *methName, 
-                            const char *methSignature, MethType methType, 
-                            JNIEnv *env, jmethodID *out);
+jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out,
+        const char *className, const char *ctorSignature, ...);
+
+/**
+ * Same as constructNewObjectOfClass but it takes in a CachedJavaClass
+ * rather than a className. This avoids an extra call to FindClass.
+ */
+jthrowable constructNewObjectOfCachedClass(JNIEnv *env, jobject *out,
+        CachedJavaClass cachedJavaClass, const char *ctorSignature, ...);
 
-jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out);
+jthrowable methodIdFromClass(jclass cls, const char *className,
+        const char *methName, const char *methSignature, MethType methType,
+        JNIEnv *env, jmethodID *out);
 
 /** classNameOfObject: Get an object's class name.
  * @param jobj: The object.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/mutexes.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/mutexes.h
index da30bf4974f77..92afabd7c75c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/mutexes.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/mutexes.h
@@ -30,12 +30,12 @@
 
 #include "platform.h"
 
-/** Mutex protecting the class reference hash table. */
-extern mutex hdfsHashMutex;
-
 /** Mutex protecting singleton JVM instance. */
 extern mutex jvmMutex;
 
+/** Mutex protecting initialization of jclasses in jclasses.h. */
+extern mutex jclassInitMutex;
+
 /**
  * Locks a mutex.
  *
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/mutexes.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/mutexes.c
index 20dafaa020b99..5c6b429d5ec03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/mutexes.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/mutexes.c
@@ -21,8 +21,8 @@
 #include 
 #include 
 
-mutex hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
 mutex jvmMutex;
+mutex jclassInitMutex = PTHREAD_MUTEX_INITIALIZER;
 pthread_mutexattr_t jvmMutexAttr;
 
 __attribute__((constructor)) static void init() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
index 110c71a855853..a55dc35f2b296 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
@@ -23,12 +23,20 @@
 #include 
 #include 
 
+#include "exception.h"
+#include "jni_helper.h"
+
+#define UNKNOWN "UNKNOWN"
+#define MAXTHRID 256
+
 /** Key that allows us to retrieve thread-local storage */
 static pthread_key_t gTlsKey;
 
 /** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
 static int gTlsKeyInitialized = 0;
 
+static void get_current_thread_id(JNIEnv* env, char* id, int max);
+
 /**
  * The function that is called whenever a thread with libhdfs thread local data
  * is destroyed.
@@ -41,16 +49,35 @@ void hdfsThreadDestructor(void *v)
   struct ThreadLocalState *state = (struct ThreadLocalState*)v;
   JNIEnv *env = state->env;;
   jint ret;
+  jthrowable jthr;
+  char thr_name[MAXTHRID];
 
   /* Detach the current thread from the JVM */
-  if ((env != NULL) && (*env != NULL)) {
+  if (env) {
     ret = (*env)->GetJavaVM(env, &vm);
-    if (ret) {
+
+    if (ret != 0) {
       fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with error %d\n",
         ret);
-      (*env)->ExceptionDescribe(env);
+      jthr = (*env)->ExceptionOccurred(env);
+      if (jthr) {
+        (*env)->ExceptionDescribe(env);
+        (*env)->ExceptionClear(env);
+      }
     } else {
-      (*vm)->DetachCurrentThread(vm);
+      ret = (*vm)->DetachCurrentThread(vm);
+
+      if (ret != JNI_OK) {
+        jthr = (*env)->ExceptionOccurred(env);
+        if (jthr) {
+          (*env)->ExceptionDescribe(env);
+          (*env)->ExceptionClear(env);
+        }
+        get_current_thread_id(env, thr_name, MAXTHRID);
+
+        fprintf(stderr, "hdfsThreadDestructor: Unable to detach thread %s "
+            "from the JVM. Error code: %d\n", thr_name, ret);
+      }
     }
   }
 
@@ -62,13 +89,73 @@ void hdfsThreadDestructor(void *v)
   free(state);
 }
 
+static void get_current_thread_id(JNIEnv* env, char* id, int max) {
+  jvalue jVal;
+  jobject thr = NULL;
+  jstring thr_name = NULL;
+  jlong thr_id = 0;
+  jthrowable jthr = NULL;
+  const char *thr_name_str;
+
+  jthr = findClassAndInvokeMethod(env, &jVal, STATIC, NULL, "java/lang/Thread",
+          "currentThread", "()Ljava/lang/Thread;");
+  if (jthr) {
+    snprintf(id, max, "%s", UNKNOWN);
+    printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+            "get_current_thread_id: Thread#currentThread failed: ");
+    goto done;
+  }
+  thr = jVal.l;
+
+  jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, thr,
+          "java/lang/Thread", "getId", "()J");
+  if (jthr) {
+    snprintf(id, max, "%s", UNKNOWN);
+    printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+            "get_current_thread_id: Thread#getId failed: ");
+    goto done;
+  }
+  thr_id = jVal.j;
+
+  jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, thr,
+          "java/lang/Thread", "toString", "()Ljava/lang/String;");
+  if (jthr) {
+    snprintf(id, max, "%s:%ld", UNKNOWN, thr_id);
+    printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+            "get_current_thread_id: Thread#toString failed: ");
+    goto done;
+  }
+  thr_name = jVal.l;
+
+  thr_name_str = (*env)->GetStringUTFChars(env, thr_name, NULL);
+  if (!thr_name_str) {
+    printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+            "get_current_thread_id: GetStringUTFChars failed: ");
+    snprintf(id, max, "%s:%ld", UNKNOWN, thr_id);
+    goto done;
+  }
+
+  // Treating the jlong as a long *should* be safe
+  snprintf(id, max, "%s:%ld", thr_name_str, thr_id);
+
+  // Release the char*
+  (*env)->ReleaseStringUTFChars(env, thr_name, thr_name_str);
+
+done:
+  destroyLocalReference(env, thr);
+  destroyLocalReference(env, thr_name);
+
+  // Make sure the id is null terminated in case we overflow the max length
+  id[max - 1] = '\0';
+}
+
 struct ThreadLocalState* threadLocalStorageCreate()
 {
   struct ThreadLocalState *state;
   state = (struct ThreadLocalState*)malloc(sizeof(struct ThreadLocalState));
   if (state == NULL) {
     fprintf(stderr,
-      "threadLocalStorageSet: OOM - Unable to allocate thread local state\n");
+      "threadLocalStorageCreate: OOM - Unable to allocate thread local state\n");
     return NULL;
   }
   state->lastExceptionStackTrace = NULL;
@@ -103,4 +190,4 @@ int threadLocalStorageSet(struct ThreadLocalState *state)
     hdfsThreadDestructor(state);
   }
   return ret;
-}
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/mutexes.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/mutexes.c
index 875f03386a817..ac7f9fd35b8af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/mutexes.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/mutexes.c
@@ -20,8 +20,8 @@
 
 #include 
 
-mutex hdfsHashMutex;
 mutex jvmMutex;
+mutex jclassInitMutex;
 
 /**
  * Unfortunately, there is no simple static initializer for a critical section.
@@ -34,8 +34,8 @@ mutex jvmMutex;
  * http://msdn.microsoft.com/en-us/library/bb918180.aspx
  */
 static void __cdecl initializeMutexes(void) {
-  InitializeCriticalSection(&hdfsHashMutex);
   InitializeCriticalSection(&jvmMutex);
+  InitializeCriticalSection(&jclassInitMutex);
 }
 #pragma section(".CRT$XCU", read)
 __declspec(allocate(".CRT$XCU"))
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
index 2da5b6bbe52e3..f64eec10a8b98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
@@ -48,6 +48,7 @@ find_package(GSasl)
 find_package(Threads)
 
 include(CheckCXXSourceCompiles)
+include(CheckSymbolExists)
 
 # Check if thread_local is supported
 unset (THREAD_LOCAL_SUPPORTED CACHE)
@@ -141,6 +142,11 @@ else (NOT NO_SASL)
     message(STATUS "Compiling with NO SASL SUPPORT")
 endif (NOT NO_SASL)
 
+check_symbol_exists(explicit_bzero "string.h" HAVE_EXPLICIT_BZERO)
+if(HAVE_EXPLICIT_BZERO)
+    add_definitions(-DHAVE_EXPLICIT_BZERO)
+endif()
+
 add_definitions(-DASIO_STANDALONE -DASIO_CPP11_DATE_TIME)
 
 # Disable optimizations if compiling debug
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
index 6b2468fd5dbdc..549da93c2aa89 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
@@ -1402,7 +1402,11 @@ int hdfsGetBlockLocations(hdfsFS fs, const char *path, struct hdfsBlockLocations
     hdfsBlockLocations *locations = new struct hdfsBlockLocations();
     (*locations_out) = locations;
 
+#ifdef HAVE_EXPLICIT_BZERO
+    explicit_bzero(locations, sizeof(*locations));
+#else
     bzero(locations, sizeof(*locations));
+#endif
     locations->fileLength = ppLocations->getFileLength();
     locations->isLastBlockComplete = ppLocations->isLastBlockComplete();
     locations->isUnderConstruction = ppLocations->isUnderConstruction();
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc
index f364d0e15a978..fba82b817ecb4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc
@@ -475,7 +475,11 @@ TEST_F(HdfsExtTest, TestReadStats) {
   hdfsFile file = hdfsOpenFile(fs, path.c_str(), O_WRONLY, 0, 0, 0);
   EXPECT_NE(nullptr, file);
   void * buf = malloc(size);
+#ifdef HAVE_EXPLICIT_BZERO
+  explicit_bzero(buf, size);
+#else
   bzero(buf, size);
+#endif
   EXPECT_EQ(size, hdfsWrite(fs, file, buf, size));
   free(buf);
   EXPECT_EQ(0, hdfsCloseFile(fs, file));
@@ -503,7 +507,10 @@ TEST_F(HdfsExtTest, TestReadStats) {
   hdfsFileFreeReadStatistics(stats);
 
   EXPECT_EQ(0, hdfsCloseFile(fs, file));
-  EXPECT_EQ(0, errno);
+  // Since libhdfs is not guaranteed to set errno to 0 on successful
+  // operations, we disable this check for now, see HDFS-14325 for a
+  // long term solution to this problem
+  // EXPECT_EQ(0, errno);
 }
 
 //Testing working directory
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
index 54d4cf651eb9e..2d265b8f03c0c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
@@ -250,6 +250,65 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
   return ret;
 }
 
+hdfsOpenFileBuilder *hdfsOpenFileBuilderAlloc(hdfsFS fs,
+        const char *path) {
+  return libhdfs_hdfsOpenFileBuilderAlloc(fs->libhdfsRep, path);
+}
+
+hdfsOpenFileBuilder *hdfsOpenFileBuilderMust(
+        hdfsOpenFileBuilder *builder, const char *key,
+        const char *value) {
+  return libhdfs_hdfsOpenFileBuilderMust(builder, key, value);
+}
+
+hdfsOpenFileBuilder *hdfsOpenFileBuilderOpt(
+        hdfsOpenFileBuilder *builder, const char *key,
+        const char *value) {
+  return libhdfs_hdfsOpenFileBuilderOpt(builder, key, value);
+}
+
+hdfsOpenFileFuture *hdfsOpenFileBuilderBuild(
+        hdfsOpenFileBuilder *builder) {
+  return libhdfs_hdfsOpenFileBuilderBuild(builder);
+}
+
+void hdfsOpenFileBuilderFree(hdfsOpenFileBuilder *builder) {
+  libhdfs_hdfsOpenFileBuilderFree(builder);
+}
+
+hdfsFile hdfsOpenFileFutureGet(hdfsOpenFileFuture *future) {
+  hdfsFile ret = calloc(1, sizeof(struct hdfsFile_internal));
+  ret->libhdfsppRep = 0;
+  ret->libhdfsRep = libhdfs_hdfsOpenFileFutureGet(future);
+  if (!ret->libhdfsRep) {
+    free(ret);
+    ret = NULL;
+  }
+  return ret;
+}
+
+hdfsFile hdfsOpenFileFutureGetWithTimeout(hdfsOpenFileFuture *future,
+        int64_t timeout, javaConcurrentTimeUnit timeUnit) {
+  hdfsFile ret = calloc(1, sizeof(struct hdfsFile_internal));
+  ret->libhdfsppRep = 0;
+  ret->libhdfsRep = libhdfs_hdfsOpenFileFutureGetWithTimeout(future, timeout,
+                                                             timeUnit);
+  if (!ret->libhdfsRep) {
+    free(ret);
+    ret = NULL;
+  }
+  return ret;
+}
+
+int hdfsOpenFileFutureCancel(hdfsOpenFileFuture *future,
+        int mayInterruptIfRunning) {
+  return libhdfs_hdfsOpenFileFutureCancel(future, mayInterruptIfRunning);
+}
+
+void hdfsOpenFileFutureFree(hdfsOpenFileFuture *future) {
+  libhdfs_hdfsOpenFileFutureFree(future);
+}
+
 int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength) {
   return libhdfs_hdfsTruncateFile(fs->libhdfsRep, path, newlength);
 }
@@ -317,6 +376,12 @@ tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position,
   return ret;
 }
 
+int hdfsPreadFully(hdfsFS fs, hdfsFile file, tOffset position,
+                void* buffer, tSize length) {
+  return libhdfs_hdfsPreadFully(fs->libhdfsRep, file->libhdfsRep, position,
+          buffer, length);
+}
+
 tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer,
                 tSize length) {
   return libhdfs_hdfsWrite(fs->libhdfsRep, file->libhdfsRep, buffer, length);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h
index aecced1a8b6e5..320a958b10c0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h
@@ -92,7 +92,11 @@ class HdfsHandle {
     hdfsFile file = hdfsOpenFile(*this, path.c_str(), O_WRONLY, 0, 0, 0);
     EXPECT_NE(nullptr, file);
     void * buf = malloc(size);
+#ifdef HAVE_EXPLICIT_BZERO
+    explicit_bzero(buf, size);
+#else
     bzero(buf, size);
+#endif
     EXPECT_EQ(1024, hdfsWrite(*this, file, buf, size));
     EXPECT_EQ(0, hdfsCloseFile(*this, file));
     free(buf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h
index b90776893f6b8..165744142558a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h
@@ -39,6 +39,23 @@
 #define hdfsConfStrFree libhdfs_hdfsConfStrFree
 #define hdfsDisconnect libhdfs_hdfsDisconnect
 #define hdfsOpenFile libhdfs_hdfsOpenFile
+#define hdfsOpenFileBuilderAlloc libhdfs_hdfsOpenFileBuilderAlloc
+#define hdfsOpenFileBuilderMust libhdfs_hdfsOpenFileBuilderMust
+#define hdfsOpenFileBuilderOpt libhdfs_hdfsOpenFileBuilderOpt
+#define hdfsOpenFileBuilderBuild libhdfs_hdfsOpenFileBuilderBuild
+#define hdfsOpenFileBuilderFree libhdfs_hdfsOpenFileBuilderFree
+#define hdfsOpenFileFutureGet libhdfs_hdfsOpenFileFutureGet
+#define javaConcurrentTimeUnit libhdfs_javaConcurrentTimeUnit
+#define jNanoseconds libhdfs_jNanoseconds
+#define jMicroseconds libhdfs_jMicroseconds
+#define jMilliseconds libhdfs_jMilliseconds
+#define jSeconds libhdfsj_jSeconds
+#define jMinutes libhdfs_jMinutes
+#define jHours libhdfs_jHours
+#define jDays libhdfs_jDays
+#define hdfsOpenFileFutureGetWithTimeout libhdfs_hdfsOpenFileFutureGetWithTimeout
+#define hdfsOpenFileFutureCancel libhdfs_hdfsOpenFileFutureCancel
+#define hdfsOpenFileFutureFree libhdfs_hdfsOpenFileFutureFree
 #define hdfsTruncateFile libhdfs_hdfsTruncateFile
 #define hdfsUnbufferFile libhdfs_hdfsUnbufferFile
 #define hdfsCloseFile libhdfs_hdfsCloseFile
@@ -47,6 +64,7 @@
 #define hdfsTell libhdfs_hdfsTell
 #define hdfsRead libhdfs_hdfsRead
 #define hdfsPread libhdfs_hdfsPread
+#define hdfsPreadFully libhdfs_hdfsPreadFully
 #define hdfsWrite libhdfs_hdfsWrite
 #define hdfsFlush libhdfs_hdfsFlush
 #define hdfsHFlush libhdfs_hdfsHFlush
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
index fce0e823ddeb8..d84b8ba287525 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
@@ -39,6 +39,23 @@
 #undef hdfsConfStrFree
 #undef hdfsDisconnect
 #undef hdfsOpenFile
+#undef hdfsOpenFileBuilderAlloc
+#undef hdfsOpenFileBuilderMust
+#undef hdfsOpenFileBuilderOpt
+#undef hdfsOpenFileBuilderBuild
+#undef hdfsOpenFileBuilderFree
+#undef hdfsOpenFileFutureGet
+#undef javaConcurrentTimeUnit
+#undef jNanoseconds
+#undef jMicroseconds
+#undef jMilliseconds
+#undef jSeconds
+#undef jMinutes
+#undef jHours
+#undef jDays
+#undef hdfsOpenFileFutureGetWithTimeout
+#undef hdfsOpenFileFutureCancel
+#undef hdfsOpenFileFutureFree
 #undef hdfsTruncateFile
 #undef hdfsUnbufferFile
 #undef hdfsCloseFile
@@ -47,6 +64,7 @@
 #undef hdfsTell
 #undef hdfsRead
 #undef hdfsPread
+#undef hdfsPreadFully
 #undef hdfsWrite
 #undef hdfsFlush
 #undef hdfsHFlush
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
index d0411c2126c88..0a6d987409fec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
@@ -39,6 +39,23 @@
 #define hdfsConfStrFree libhdfspp_hdfsConfStrFree
 #define hdfsDisconnect libhdfspp_hdfsDisconnect
 #define hdfsOpenFile libhdfspp_hdfsOpenFile
+#define hdfsOpenFileBuilderAlloc libhdfspp_hdfsOpenFileBuilderAlloc
+#define hdfsOpenFileBuilderMust libhdfspp_hdfsOpenFileBuilderMust
+#define hdfsOpenFileBuilderOpt libhdfspp_hdfsOpenFileBuilderOpt
+#define hdfsOpenFileBuilderBuild libhdfspp_hdfsOpenFileBuilderBuild
+#define hdfsOpenFileBuilderFree libhdfspp_hdfsOpenFileBuilderFree
+#define hdfsOpenFileFutureGet libhdfspp_hdfsOpenFileFutureGet
+#define javaConcurrentTimeUnit libhdfspp_javaConcurrentTimeUnit
+#define jNanoseconds libhdfspp_jNanoseconds
+#define jMicroseconds libhdfspp_jMicroseconds
+#define jMilliseconds libhdfspp_jMilliseconds
+#define jSeconds libhdfspp_jSeconds
+#define jMinutes libhdfspp_jMinutes
+#define jHours libhdfspp_jHours
+#define jDays libhdfspp_jDays
+#define hdfsOpenFileFutureGetWithTimeout libhdfspp_hdfsOpenFileFutureGetWithTimeout
+#define hdfsOpenFileFutureCancel libhdfspp_hdfsOpenFileFutureCancel
+#define hdfsOpenFileFutureFree libhdfspp_hdfsOpenFileFutureFree
 #define hdfsTruncateFile libhdfspp_hdfsTruncateFile
 #define hdfsUnbufferFile libhdfspp_hdfsUnbufferFile
 #define hdfsCloseFile libhdfspp_hdfsCloseFile
@@ -47,6 +64,7 @@
 #define hdfsTell libhdfspp_hdfsTell
 #define hdfsRead libhdfspp_hdfsRead
 #define hdfsPread libhdfspp_hdfsPread
+#define hdfsPreadFully libhdfspp_hdfsPreadFully
 #define hdfsWrite libhdfspp_hdfsWrite
 #define hdfsFlush libhdfspp_hdfsFlush
 #define hdfsHFlush libhdfspp_hdfsHFlush
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java
new file mode 100644
index 0000000000000..0692f5a12893c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This class tests the DFS positional read functionality on a single node
+ * mini-cluster. These tests are inspired from {@link TestPread}. The tests
+ * are much less comprehensive than other pread tests because pread already
+ * internally uses {@link ByteBuffer}s.
+ */
+public class TestByteBufferPread {
+
+  private static MiniDFSCluster cluster;
+  private static FileSystem fs;
+  private static byte[] fileContents;
+  private static Path testFile;
+  private static Random rand;
+
+  private static final long SEED = 0xDEADBEEFL;
+  private static final int BLOCK_SIZE = 4096;
+  private static final int FILE_SIZE = 12 * BLOCK_SIZE;
+
+  @BeforeClass
+  public static void setup() throws IOException {
+    // Setup the cluster with a small block size so we can create small files
+    // that span multiple blocks
+    Configuration conf = new Configuration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    fs = cluster.getFileSystem();
+
+    // Create a test file that spans 12 blocks, and contains a bunch of random
+    // bytes
+    fileContents = new byte[FILE_SIZE];
+    rand = new Random(SEED);
+    rand.nextBytes(fileContents);
+    testFile = new Path("/byte-buffer-pread-test.dat");
+    try (FSDataOutputStream out = fs.create(testFile, (short) 3)) {
+      out.write(fileContents);
+    }
+  }
+
+  /**
+   * Test preads with {@link java.nio.HeapByteBuffer}s.
+   */
+  @Test
+  public void testPreadWithHeapByteBuffer() throws IOException {
+    testPreadWithByteBuffer(ByteBuffer.allocate(FILE_SIZE));
+    testPreadWithFullByteBuffer(ByteBuffer.allocate(FILE_SIZE));
+    testPreadWithPositionedByteBuffer(ByteBuffer.allocate(FILE_SIZE));
+    testPreadWithLimitedByteBuffer(ByteBuffer.allocate(FILE_SIZE));
+    testPositionedPreadWithByteBuffer(ByteBuffer.allocate(FILE_SIZE));
+    testPreadFullyWithByteBuffer(ByteBuffer.allocate(FILE_SIZE));
+  }
+
+  /**
+   * Test preads with {@link java.nio.DirectByteBuffer}s.
+   */
+  @Test
+  public void testPreadWithDirectByteBuffer() throws IOException {
+    testPreadWithByteBuffer(ByteBuffer.allocateDirect(FILE_SIZE));
+    testPreadWithFullByteBuffer(ByteBuffer.allocateDirect(FILE_SIZE));
+    testPreadWithPositionedByteBuffer(ByteBuffer.allocateDirect(FILE_SIZE));
+    testPreadWithLimitedByteBuffer(ByteBuffer.allocateDirect(FILE_SIZE));
+    testPositionedPreadWithByteBuffer(ByteBuffer.allocateDirect(FILE_SIZE));
+    testPreadFullyWithByteBuffer(ByteBuffer.allocateDirect(FILE_SIZE));
+  }
+
+  /**
+   * Reads the entire testFile using the pread API and validates that its
+   * contents are properly loaded into the supplied {@link ByteBuffer}.
+   */
+  private void testPreadWithByteBuffer(ByteBuffer buffer) throws IOException {
+    int bytesRead;
+    int totalBytesRead = 0;
+    try (FSDataInputStream in = fs.open(testFile)) {
+      while ((bytesRead = in.read(totalBytesRead, buffer)) > 0) {
+        totalBytesRead += bytesRead;
+        // Check that each call to read changes the position of the ByteBuffer
+        // correctly
+        assertEquals(totalBytesRead, buffer.position());
+      }
+
+      // Make sure the buffer is full
+      assertFalse(buffer.hasRemaining());
+      // Make sure the contents of the read buffer equal the contents of the
+      // file
+      buffer.position(0);
+      byte[] bufferContents = new byte[FILE_SIZE];
+      buffer.get(bufferContents);
+      assertArrayEquals(bufferContents, fileContents);
+    }
+  }
+
+  /**
+   * Attempts to read the testFile into a {@link ByteBuffer} that is already
+   * full, and validates that doing so does not change the contents of the
+   * supplied {@link ByteBuffer}.
+   */
+  private void testPreadWithFullByteBuffer(ByteBuffer buffer)
+          throws IOException {
+    // Load some dummy data into the buffer
+    byte[] existingBufferBytes = new byte[FILE_SIZE];
+    rand.nextBytes(existingBufferBytes);
+    buffer.put(existingBufferBytes);
+    // Make sure the buffer is full
+    assertFalse(buffer.hasRemaining());
+
+    try (FSDataInputStream in = fs.open(testFile)) {
+      // Attempt to read into the buffer, 0 bytes should be read since the
+      // buffer is full
+      assertEquals(0, in.read(buffer));
+
+      // Double check the buffer is still full and its contents have not
+      // changed
+      assertFalse(buffer.hasRemaining());
+      buffer.position(0);
+      byte[] bufferContents = new byte[FILE_SIZE];
+      buffer.get(bufferContents);
+      assertArrayEquals(bufferContents, existingBufferBytes);
+    }
+  }
+
+  /**
+   * Reads half of the testFile into the {@link ByteBuffer} by setting a
+   * {@link ByteBuffer#limit()} on the buffer. Validates that only half of the
+   * testFile is loaded into the buffer.
+   */
+  private void testPreadWithLimitedByteBuffer(
+          ByteBuffer buffer) throws IOException {
+    int bytesRead;
+    int totalBytesRead = 0;
+    // Set the buffer limit to half the size of the file
+    buffer.limit(FILE_SIZE / 2);
+
+    try (FSDataInputStream in = fs.open(testFile)) {
+      while ((bytesRead = in.read(totalBytesRead, buffer)) > 0) {
+        totalBytesRead += bytesRead;
+        // Check that each call to read changes the position of the ByteBuffer
+        // correctly
+        assertEquals(totalBytesRead, buffer.position());
+      }
+
+      // Since we set the buffer limit to half the size of the file, we should
+      // have only read half of the file into the buffer
+      assertEquals(totalBytesRead, FILE_SIZE / 2);
+      // Check that the buffer is full and the contents equal the first half of
+      // the file
+      assertFalse(buffer.hasRemaining());
+      buffer.position(0);
+      byte[] bufferContents = new byte[FILE_SIZE / 2];
+      buffer.get(bufferContents);
+      assertArrayEquals(bufferContents,
+              Arrays.copyOfRange(fileContents, 0, FILE_SIZE / 2));
+    }
+  }
+
+  /**
+   * Reads half of the testFile into the {@link ByteBuffer} by setting the
+   * {@link ByteBuffer#position()} the half the size of the file. Validates that
+   * only half of the testFile is loaded into the buffer.
+   */
+  private void testPreadWithPositionedByteBuffer(
+          ByteBuffer buffer) throws IOException {
+    int bytesRead;
+    int totalBytesRead = 0;
+    // Set the buffer position to half the size of the file
+    buffer.position(FILE_SIZE / 2);
+
+    try (FSDataInputStream in = fs.open(testFile)) {
+      while ((bytesRead = in.read(totalBytesRead, buffer)) > 0) {
+        totalBytesRead += bytesRead;
+        // Check that each call to read changes the position of the ByteBuffer
+        // correctly
+        assertEquals(totalBytesRead + FILE_SIZE / 2, buffer.position());
+      }
+
+      // Since we set the buffer position to half the size of the file, we
+      // should have only read half of the file into the buffer
+      assertEquals(totalBytesRead, FILE_SIZE / 2);
+      // Check that the buffer is full and the contents equal the first half of
+      // the file
+      assertFalse(buffer.hasRemaining());
+      buffer.position(FILE_SIZE / 2);
+      byte[] bufferContents = new byte[FILE_SIZE / 2];
+      buffer.get(bufferContents);
+      assertArrayEquals(bufferContents,
+              Arrays.copyOfRange(fileContents, 0, FILE_SIZE / 2));
+    }
+  }
+
+  /**
+   * Reads half of the testFile into the {@link ByteBuffer} by specifying a
+   * position for the pread API that is half of the file size. Validates that
+   * only half of the testFile is loaded into the buffer.
+   */
+  private void testPositionedPreadWithByteBuffer(
+          ByteBuffer buffer) throws IOException {
+    int bytesRead;
+    int totalBytesRead = 0;
+
+    try (FSDataInputStream in = fs.open(testFile)) {
+      // Start reading from halfway through the file
+      while ((bytesRead = in.read(totalBytesRead + FILE_SIZE / 2,
+              buffer)) > 0) {
+        totalBytesRead += bytesRead;
+        // Check that each call to read changes the position of the ByteBuffer
+        // correctly
+        assertEquals(totalBytesRead, buffer.position());
+      }
+
+      // Since we starting reading halfway through the file, the buffer should
+      // only be half full
+      assertEquals(totalBytesRead, FILE_SIZE / 2);
+      assertEquals(buffer.position(), FILE_SIZE / 2);
+      assertTrue(buffer.hasRemaining());
+      // Check that the buffer contents equal the second half of the file
+      buffer.position(0);
+      byte[] bufferContents = new byte[FILE_SIZE / 2];
+      buffer.get(bufferContents);
+      assertArrayEquals(bufferContents,
+              Arrays.copyOfRange(fileContents, FILE_SIZE / 2, FILE_SIZE));
+    }
+  }
+
+  /**
+   * Reads the entire testFile using the preadFully API and validates that its
+   * contents are properly loaded into the supplied {@link ByteBuffer}.
+   */
+  private void testPreadFullyWithByteBuffer(ByteBuffer buffer)
+          throws IOException {
+    int totalBytesRead = 0;
+    try (FSDataInputStream in = fs.open(testFile)) {
+      in.readFully(totalBytesRead, buffer);
+      // Make sure the buffer is full
+      assertFalse(buffer.hasRemaining());
+      // Make sure the contents of the read buffer equal the contents of the
+      // file
+      buffer.position(0);
+      byte[] bufferContents = new byte[FILE_SIZE];
+      buffer.get(bufferContents);
+      assertArrayEquals(bufferContents, fileContents);
+    }
+  }
+
+  @AfterClass
+  public static void shutdown() throws IOException {
+    try {
+      fs.delete(testFile, false);
+      fs.close();
+    } finally {
+      cluster.shutdown(true);
+    }
+  }
+}
\ No newline at end of file