diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
index a2273bf83343b..ccde54ed2942c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
@@ -33,18 +33,7 @@
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.ByteBufferReadable;
-import org.apache.hadoop.fs.CanSetDropBehind;
-import org.apache.hadoop.fs.CanSetReadahead;
-import org.apache.hadoop.fs.CanUnbuffer;
-import org.apache.hadoop.fs.FSExceptionMessages;
-import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
-import org.apache.hadoop.fs.HasFileDescriptor;
-import org.apache.hadoop.fs.PositionedReadable;
-import org.apache.hadoop.fs.ReadOption;
-import org.apache.hadoop.fs.Seekable;
-import org.apache.hadoop.fs.StreamCapabilities;
-import org.apache.hadoop.fs.StreamCapabilitiesPolicy;
+import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.ByteBufferPool;
import org.apache.hadoop.util.StringUtils;
@@ -64,7 +53,7 @@
public class CryptoInputStream extends FilterInputStream implements
Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor,
CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess,
- ReadableByteChannel, CanUnbuffer, StreamCapabilities {
+ ReadableByteChannel, CanUnbuffer, StreamCapabilities{
private final byte[] oneByteBuf = new byte[1];
private final CryptoCodec codec;
private final Decryptor decryptor;
@@ -328,20 +317,40 @@ public int read(long position, byte[] buffer, int offset, int length)
throws IOException {
checkStream();
try {
- final int n = ((PositionedReadable) in).read(position, buffer, offset,
+ final int n = ((PositionedReadable) in).read(position, buffer, offset,
length);
if (n > 0) {
// This operation does not change the current offset of the file
decrypt(position, buffer, offset, n);
}
-
+
return n;
} catch (ClassCastException e) {
throw new UnsupportedOperationException("This stream does not support " +
"positioned read.");
}
}
-
+
+ /**
+ * Positioned readFully using {@link ByteBuffer}s. This method is thread-safe.
+ */
+ // @Override
+ public void readFully(long position, final ByteBuffer buf)
+ throws IOException {
+ checkStream();
+ if (!(in instanceof ByteBufferPositionedReadable)) {
+ throw new UnsupportedOperationException(in.getClass().getCanonicalName()
+ + " does not support positioned reads with byte buffers.");
+ }
+ int bufPos = buf.position();
+ ((ByteBufferPositionedReadable) in).readFully(position, buf);
+ final int n = buf.position() - bufPos;
+ if (n > 0) {
+ // This operation does not change the current offset of the file
+ decrypt(position, buf, n, bufPos);
+ }
+ }
+
/**
* Decrypt length bytes in buffer starting at offset. Output is also put
* into buffer starting at offset. It is thread-safe.
@@ -375,7 +384,7 @@ private void decrypt(long position, byte[] buffer, int offset, int length)
returnDecryptor(decryptor);
}
}
-
+
/** Positioned read fully. It is thread-safe */
@Override
public void readFully(long position, byte[] buffer, int offset, int length)
@@ -407,7 +416,7 @@ public void seek(long pos) throws IOException {
checkStream();
try {
/*
- * If data of target pos in the underlying stream has already been read
+ * If data of target pos in the underlying stream has already been read
* and decrypted in outBuffer, we just need to re-position outBuffer.
*/
if (pos <= streamOffset && pos >= (streamOffset - outBuffer.remaining())) {
@@ -523,7 +532,7 @@ public int read(ByteBuffer buf) throws IOException {
* Output is also buf and same start position.
* buf.position() and buf.limit() should be unchanged after decryption.
*/
- private void decrypt(ByteBuffer buf, int n, int start)
+ private void decrypt(ByteBuffer buf, int n, int start)
throws IOException {
final int pos = buf.position();
final int limit = buf.limit();
@@ -545,6 +554,51 @@ private void decrypt(ByteBuffer buf, int n, int start)
}
buf.position(pos);
}
+
+ private void decrypt(long filePosition, ByteBuffer buf, int length, int start)
+ throws IOException {
+ ByteBuffer localInBuffer = null;
+ ByteBuffer localOutBuffer = null;
+
+ // Duplicate the buffer so we don't have to worry about resetting the
+ // original position and limit at the end of the method
+ buf = buf.duplicate();
+
+ int decryptedBytes = 0;
+ Decryptor localDecryptor = null;
+ try {
+ localInBuffer = getBuffer();
+ localOutBuffer = getBuffer();
+ localDecryptor = getDecryptor();
+ byte[] localIV = initIV.clone();
+ updateDecryptor(localDecryptor, filePosition, localIV);
+ byte localPadding = getPadding(filePosition);
+ // Set proper filePosition for inputdata.
+ localInBuffer.position(localPadding);
+
+ while (decryptedBytes < length) {
+ buf.position(start + decryptedBytes);
+ buf.limit(start + decryptedBytes +
+ Math.min(length - decryptedBytes, localInBuffer.remaining()));
+ localInBuffer.put(buf);
+ // Do decryption
+ try {
+ decrypt(localDecryptor, localInBuffer, localOutBuffer, localPadding);
+ buf.position(start + decryptedBytes);
+ buf.limit(start + length);
+ decryptedBytes += localOutBuffer.remaining();
+ buf.put(localOutBuffer);
+ } finally {
+ localPadding = afterDecryption(localDecryptor, localInBuffer,
+ filePosition + length, localIV);
+ }
+ }
+ } finally {
+ returnBuffer(localInBuffer);
+ returnBuffer(localOutBuffer);
+ returnDecryptor(localDecryptor);
+ }
+ }
@Override
public int available() throws IOException {
@@ -605,7 +659,7 @@ public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
}
return buffer;
} catch (ClassCastException e) {
- throw new UnsupportedOperationException("This stream does not support " +
+ throw new UnsupportedOperationException("This stream does not support " +
"enhanced byte buffer access.");
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java
new file mode 100644
index 0000000000000..f8282d88c46c3
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Implementers of this interface provide a positioned read API that writes to a
+ * {@link ByteBuffer} rather than a {@code byte[]}.
+ *
+ * @see PositionedReadable
+ * @see ByteBufferReadable
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface ByteBufferPositionedReadable {
+ /**
+ * Reads up to {@code buf.remaining()} bytes into buf from a given position
+ * in the file and returns the number of bytes read. Callers should use
+ * {@code buf.limit(...)} to control the size of the desired read and
+ * {@code buf.position(...)} to control the offset into the buffer the data
+ * should be written to.
+ *
+ * After a successful call, {@code buf.position()} will be advanced by the
+ * number of bytes read and {@code buf.limit()} will be unchanged.
+ *
+ * In the case of an exception, the state of the buffer (the contents of the
+ * buffer, the {@code buf.position()}, the {@code buf.limit()}, etc.) is
+ * undefined, and callers should be prepared to recover from this
+ * eventuality.
+ *
+ * Callers should use {@link StreamCapabilities#hasCapability(String)} with
+ * {@link StreamCapabilities#PREADBYTEBUFFER} to check if the underlying
+ * stream supports this interface, otherwise they might get a
+ * {@link UnsupportedOperationException}.
+ *
+ * Implementations should treat 0-length requests as legitimate, and must not
+ * signal an error upon their receipt.
+ *
+ * This does not change the current offset of a file, and is thread-safe.
+ *
+ * @param position position within file
+ * @param buf the ByteBuffer to receive the results of the read operation.
+ * @return the number of bytes read, possibly zero, or -1 if reached
+ * end-of-stream
+ * @throws IOException if there is some error performing the read
+ */
+ int read(long position, ByteBuffer buf) throws IOException;
+
+ /**
+ * Reads {@code buf.remaining()} bytes into buf from a given position in
+ * the file or until the end of the data was reached before the read
+ * operation completed. Callers should use {@code buf.limit(...)} to
+ * control the size of the desired read and {@code buf.position(...)} to
+ * control the offset into the buffer the data should be written to.
+ *
+ * This operation provides similar semantics to
+ * {@link #read(long, ByteBuffer)}, the difference is that this method is
+ * guaranteed to read data until the {@link ByteBuffer} is full, or until
+ * the end of the data stream is reached.
+ *
+ * @param position position within file
+ * @param buf the ByteBuffer to receive the results of the read operation.
+ * @throws IOException if there is some error performing the read
+ * @throws EOFException the end of the data was reached before
+ * the read operation completed
+ * @see #read(long, ByteBuffer)
+ */
+ void readFully(long position, ByteBuffer buf) throws IOException;
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
index 08d71f16c0783..a09b0e917cde8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
@@ -38,23 +38,24 @@
public class FSDataInputStream extends DataInputStream
implements Seekable, PositionedReadable,
ByteBufferReadable, HasFileDescriptor, CanSetDropBehind, CanSetReadahead,
- HasEnhancedByteBufferAccess, CanUnbuffer, StreamCapabilities {
+ HasEnhancedByteBufferAccess, CanUnbuffer, StreamCapabilities,
+ ByteBufferPositionedReadable {
/**
- * Map ByteBuffers that we have handed out to readers to ByteBufferPool
+ * Map ByteBuffers that we have handed out to readers to ByteBufferPool
* objects
*/
private final IdentityHashStore
- extendedReadBuffers
- = new IdentityHashStore(0);
+ extendedReadBuffers
+ = new IdentityHashStore(0);
public FSDataInputStream(InputStream in) {
super(in);
- if( !(in instanceof Seekable) || !(in instanceof PositionedReadable) ) {
- throw new IllegalArgumentException(
- "In is not an instance of Seekable or PositionedReadable");
+ if (!(in instanceof Seekable) || !(in instanceof PositionedReadable)) {
+ throw new IllegalArgumentException(in.getClass().getCanonicalName() +
+ " is not an instance of Seekable or PositionedReadable");
}
}
-
+
/**
* Seek to the given offset.
*
@@ -62,7 +63,7 @@ public FSDataInputStream(InputStream in) {
*/
@Override
public void seek(long desired) throws IOException {
- ((Seekable)in).seek(desired);
+ ((Seekable) in).seek(desired);
}
/**
@@ -72,65 +73,65 @@ public void seek(long desired) throws IOException {
*/
@Override
public long getPos() throws IOException {
- return ((Seekable)in).getPos();
+ return ((Seekable) in).getPos();
}
-
+
/**
* Read bytes from the given position in the stream to the given buffer.
*
- * @param position position in the input stream to seek
- * @param buffer buffer into which data is read
- * @param offset offset into the buffer in which data is written
- * @param length maximum number of bytes to read
+ * @param position position in the input stream to seek
+ * @param buffer buffer into which data is read
+ * @param offset offset into the buffer in which data is written
+ * @param length maximum number of bytes to read
* @return total number of bytes read into the buffer, or -1
- * if there is no more data because the end of the stream has been
- * reached
+ * if there is no more data because the end of the stream has been
+ * reached
*/
@Override
public int read(long position, byte[] buffer, int offset, int length)
- throws IOException {
- return ((PositionedReadable)in).read(position, buffer, offset, length);
+ throws IOException {
+ return ((PositionedReadable) in).read(position, buffer, offset, length);
}
/**
* Read bytes from the given position in the stream to the given buffer.
* Continues to read until length bytes have been read.
*
- * @param position position in the input stream to seek
- * @param buffer buffer into which data is read
- * @param offset offset into the buffer in which data is written
- * @param length the number of bytes to read
- * @throws IOException IO problems
+ * @param position position in the input stream to seek
+ * @param buffer buffer into which data is read
+ * @param offset offset into the buffer in which data is written
+ * @param length the number of bytes to read
+ * @throws IOException IO problems
* @throws EOFException If the end of stream is reached while reading.
* If an exception is thrown an undetermined number
- * of bytes in the buffer may have been written.
+ * of bytes in the buffer may have been written.
*/
@Override
public void readFully(long position, byte[] buffer, int offset, int length)
- throws IOException {
- ((PositionedReadable)in).readFully(position, buffer, offset, length);
+ throws IOException {
+ ((PositionedReadable) in).readFully(position, buffer, offset, length);
}
-
+
/**
* See {@link #readFully(long, byte[], int, int)}.
*/
@Override
public void readFully(long position, byte[] buffer)
- throws IOException {
- ((PositionedReadable)in).readFully(position, buffer, 0, buffer.length);
+ throws IOException {
+ ((PositionedReadable) in).readFully(position, buffer, 0, buffer.length);
}
-
+
/**
* Seek to the given position on an alternate copy of the data.
*
- * @param targetPos position to seek to
+ * @param targetPos position to seek to
* @return true if a new source is found, false otherwise
*/
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
- return ((Seekable)in).seekToNewSource(targetPos);
+ return ((Seekable) in).seekToNewSource(targetPos);
}
-
+
/**
* Get a reference to the wrapped input stream. Used by unit tests.
*
@@ -144,10 +145,11 @@ public InputStream getWrappedStream() {
@Override
public int read(ByteBuffer buf) throws IOException {
if (in instanceof ByteBufferReadable) {
- return ((ByteBufferReadable)in).read(buf);
+ return ((ByteBufferReadable) in).read(buf);
}
- throw new UnsupportedOperationException("Byte-buffer read unsupported by input stream");
+ throw new UnsupportedOperationException("Byte-buffer read unsupported " +
+ "by " + in.getClass().getCanonicalName());
}
@Override
@@ -163,38 +165,36 @@ public FileDescriptor getFileDescriptor() throws IOException {
@Override
public void setReadahead(Long readahead)
- throws IOException, UnsupportedOperationException {
+ throws IOException, UnsupportedOperationException {
try {
- ((CanSetReadahead)in).setReadahead(readahead);
+ ((CanSetReadahead) in).setReadahead(readahead);
} catch (ClassCastException e) {
- throw new UnsupportedOperationException(
- "this stream does not support setting the readahead " +
- "caching strategy.");
+ throw new UnsupportedOperationException(in.getClass().getCanonicalName() +
+ " does not support setting the readahead caching strategy.");
}
}
@Override
public void setDropBehind(Boolean dropBehind)
- throws IOException, UnsupportedOperationException {
+ throws IOException, UnsupportedOperationException {
try {
- ((CanSetDropBehind)in).setDropBehind(dropBehind);
+ ((CanSetDropBehind) in).setDropBehind(dropBehind);
} catch (ClassCastException e) {
throw new UnsupportedOperationException("this stream does not " +
- "support setting the drop-behind caching setting.");
+ "support setting the drop-behind caching setting.");
}
}
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
- EnumSet opts)
+ EnumSet opts)
throws IOException, UnsupportedOperationException {
try {
- return ((HasEnhancedByteBufferAccess)in).read(bufferPool,
- maxLength, opts);
- }
- catch (ClassCastException e) {
+ return ((HasEnhancedByteBufferAccess) in).read(bufferPool,
+ maxLength, opts);
+ } catch (ClassCastException e) {
ByteBuffer buffer = ByteBufferUtil.
- fallbackRead(this, bufferPool, maxLength);
+ fallbackRead(this, bufferPool, maxLength);
if (buffer != null) {
extendedReadBuffers.put(buffer, bufferPool);
}
@@ -203,23 +203,22 @@ public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
}
private static final EnumSet EMPTY_READ_OPTIONS_SET =
- EnumSet.noneOf(ReadOption.class);
+ EnumSet.noneOf(ReadOption.class);
final public ByteBuffer read(ByteBufferPool bufferPool, int maxLength)
throws IOException, UnsupportedOperationException {
return read(bufferPool, maxLength, EMPTY_READ_OPTIONS_SET);
}
-
+
@Override
public void releaseBuffer(ByteBuffer buffer) {
try {
- ((HasEnhancedByteBufferAccess)in).releaseBuffer(buffer);
- }
- catch (ClassCastException e) {
- ByteBufferPool bufferPool = extendedReadBuffers.remove( buffer);
+ ((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer);
+ } catch (ClassCastException e) {
+ ByteBufferPool bufferPool = extendedReadBuffers.remove(buffer);
if (bufferPool == null) {
throw new IllegalArgumentException("tried to release a buffer " +
- "that was not created by this stream.");
+ "that was not created by this stream.");
}
bufferPool.putBuffer(buffer);
}
@@ -240,10 +239,30 @@ public boolean hasCapability(String capability) {
/**
* String value. Includes the string value of the inner stream
+ *
* @return the stream
*/
@Override
public String toString() {
return super.toString() + ": " + in;
}
+
+ @Override
+ public int read(long position, ByteBuffer buf) throws IOException {
+ if (in instanceof ByteBufferPositionedReadable) {
+ return ((ByteBufferPositionedReadable) in).read(position, buf);
+ }
+ throw new UnsupportedOperationException("Byte-buffer pread unsupported " +
+ "by " + in.getClass().getCanonicalName());
+ }
+
+ @Override
+ public void readFully(long position, ByteBuffer buf) throws IOException {
+ if (in instanceof ByteBufferPositionedReadable) {
+ ((ByteBufferPositionedReadable) in).readFully(position, buf);
+ } else {
+ throw new UnsupportedOperationException("Byte-buffer pread " +
+ "unsupported by " + in.getClass().getCanonicalName());
+ }
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java
index 3549cdc4fa392..e68e7b351ed78 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java
@@ -59,6 +59,18 @@ public interface StreamCapabilities {
*/
String UNBUFFER = "in:unbuffer";
+ /**
+ * Stream read(ByteBuffer) capability implemented by
+ * {@link ByteBufferReadable#read(java.nio.ByteBuffer)}.
+ */
+ String READBYTEBUFFER = "in:readbytebuffer";
+
+ /**
+ * Stream read(long, ByteBuffer) capability implemented by
+ * {@link ByteBufferPositionedReadable#read(long, java.nio.ByteBuffer)}.
+ */
+ String PREADBYTEBUFFER = "in:preadbytebuffer";
+
/**
* Capabilities that a stream can support and be queried for.
*/
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c
index 402ffd5bb20a6..b463679fcdb6f 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c
@@ -199,8 +199,5 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsMapping_getGroupsForUser
if (ginfo) {
hadoop_group_info_free(ginfo);
}
- if (jgroupname) {
- (*env)->DeleteLocalRef(env, jgroupname);
- }
return jgroups;
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
index a0eb105833809..498add4629dc5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
@@ -26,7 +26,9 @@
import java.util.EnumSet;
import java.util.Random;
+import org.apache.hadoop.fs.ByteBufferPositionedReadable;
import org.apache.hadoop.fs.ByteBufferReadable;
+import org.apache.hadoop.fs.ByteBufferPositionedReadable;
import org.apache.hadoop.fs.CanUnbuffer;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSExceptionMessages;
@@ -129,6 +131,32 @@ private void preadCheck(PositionedReadable in) throws Exception {
Assert.assertArrayEquals(result, expectedData);
}
+ private int byteBufferPreadAll(ByteBufferPositionedReadable in,
+ ByteBuffer buf) throws IOException {
+ int n = 0;
+ int total = 0;
+ while (n != -1) {
+ total += n;
+ if (!buf.hasRemaining()) {
+ break;
+ }
+ n = in.read(total, buf);
+ }
+
+ return total;
+ }
+
+ private void byteBufferPreadCheck(ByteBufferPositionedReadable in)
+ throws Exception {
+ ByteBuffer result = ByteBuffer.allocate(dataLen);
+ int n = byteBufferPreadAll(in, result);
+
+ Assert.assertEquals(dataLen, n);
+ ByteBuffer expectedData = ByteBuffer.allocate(n);
+ expectedData.put(data, 0, n);
+ Assert.assertArrayEquals(result.array(), expectedData.array());
+ }
+
protected OutputStream getOutputStream(int bufferSize) throws IOException {
return getOutputStream(bufferSize, key, iv);
}
@@ -288,20 +316,36 @@ private int readAll(InputStream in, long pos, byte[] b, int off, int len)
return total;
}
+
+ private int readAll(InputStream in, long pos, ByteBuffer buf)
+ throws IOException {
+ int n = 0;
+ int total = 0;
+ while (n != -1) {
+ total += n;
+ if (!buf.hasRemaining()) {
+ break;
+ }
+ n = ((ByteBufferPositionedReadable) in).read(pos + total, buf);
+ }
+
+ return total;
+ }
/** Test positioned read. */
@Test(timeout=120000)
public void testPositionedRead() throws Exception {
- OutputStream out = getOutputStream(defaultBufferSize);
- writeData(out);
+ try (OutputStream out = getOutputStream(defaultBufferSize)) {
+ writeData(out);
+ }
- InputStream in = getInputStream(defaultBufferSize);
- // Pos: 1/3 dataLen
- positionedReadCheck(in , dataLen / 3);
+ try (InputStream in = getInputStream(defaultBufferSize)) {
+ // Pos: 1/3 dataLen
+ positionedReadCheck(in, dataLen / 3);
- // Pos: 1/2 dataLen
- positionedReadCheck(in, dataLen / 2);
- in.close();
+ // Pos: 1/2 dataLen
+ positionedReadCheck(in, dataLen / 2);
+ }
}
private void positionedReadCheck(InputStream in, int pos) throws Exception {
@@ -315,43 +359,71 @@ private void positionedReadCheck(InputStream in, int pos) throws Exception {
System.arraycopy(data, pos, expectedData, 0, n);
Assert.assertArrayEquals(readData, expectedData);
}
+
+ /** Test positioned read with ByteBuffers. */
+ @Test(timeout=120000)
+ public void testPositionedReadWithByteBuffer() throws Exception {
+ try (OutputStream out = getOutputStream(defaultBufferSize)) {
+ writeData(out);
+ }
+
+ try (InputStream in = getInputStream(defaultBufferSize)) {
+ // Pos: 1/3 dataLen
+ positionedReadCheckWithByteBuffer(in, dataLen / 3);
+
+ // Pos: 1/2 dataLen
+ positionedReadCheckWithByteBuffer(in, dataLen / 2);
+ }
+ }
+
+ private void positionedReadCheckWithByteBuffer(InputStream in, int pos)
+ throws Exception {
+ ByteBuffer result = ByteBuffer.allocate(dataLen);
+ int n = readAll(in, pos, result);
+
+ Assert.assertEquals(dataLen, n + pos);
+ byte[] readData = new byte[n];
+ System.arraycopy(result.array(), 0, readData, 0, n);
+ byte[] expectedData = new byte[n];
+ System.arraycopy(data, pos, expectedData, 0, n);
+ Assert.assertArrayEquals(readData, expectedData);
+ }
- /** Test read fully */
+ /** Test read fully. */
@Test(timeout=120000)
public void testReadFully() throws Exception {
OutputStream out = getOutputStream(defaultBufferSize);
writeData(out);
- InputStream in = getInputStream(defaultBufferSize);
- final int len1 = dataLen / 4;
- // Read len1 bytes
- byte[] readData = new byte[len1];
- readAll(in, readData, 0, len1);
- byte[] expectedData = new byte[len1];
- System.arraycopy(data, 0, expectedData, 0, len1);
- Assert.assertArrayEquals(readData, expectedData);
-
- // Pos: 1/3 dataLen
- readFullyCheck(in, dataLen / 3);
-
- // Read len1 bytes
- readData = new byte[len1];
- readAll(in, readData, 0, len1);
- expectedData = new byte[len1];
- System.arraycopy(data, len1, expectedData, 0, len1);
- Assert.assertArrayEquals(readData, expectedData);
-
- // Pos: 1/2 dataLen
- readFullyCheck(in, dataLen / 2);
-
- // Read len1 bytes
- readData = new byte[len1];
- readAll(in, readData, 0, len1);
- expectedData = new byte[len1];
- System.arraycopy(data, 2 * len1, expectedData, 0, len1);
- Assert.assertArrayEquals(readData, expectedData);
-
- in.close();
+ try (InputStream in = getInputStream(defaultBufferSize)) {
+ final int len1 = dataLen / 4;
+ // Read len1 bytes
+ byte[] readData = new byte[len1];
+ readAll(in, readData, 0, len1);
+ byte[] expectedData = new byte[len1];
+ System.arraycopy(data, 0, expectedData, 0, len1);
+ Assert.assertArrayEquals(readData, expectedData);
+
+ // Pos: 1/3 dataLen
+ readFullyCheck(in, dataLen / 3);
+
+ // Read len1 bytes
+ readData = new byte[len1];
+ readAll(in, readData, 0, len1);
+ expectedData = new byte[len1];
+ System.arraycopy(data, len1, expectedData, 0, len1);
+ Assert.assertArrayEquals(readData, expectedData);
+
+ // Pos: 1/2 dataLen
+ readFullyCheck(in, dataLen / 2);
+
+ // Read len1 bytes
+ readData = new byte[len1];
+ readAll(in, readData, 0, len1);
+ expectedData = new byte[len1];
+ System.arraycopy(data, 2 * len1, expectedData, 0, len1);
+ Assert.assertArrayEquals(readData, expectedData);
+ }
}
private void readFullyCheck(InputStream in, int pos) throws Exception {
@@ -369,6 +441,60 @@ private void readFullyCheck(InputStream in, int pos) throws Exception {
} catch (EOFException e) {
}
}
+
+ /** Test byte byffer read fully. */
+ @Test(timeout=120000)
+ public void testByteBufferReadFully() throws Exception {
+ OutputStream out = getOutputStream(defaultBufferSize);
+ writeData(out);
+
+ try (InputStream in = getInputStream(defaultBufferSize)) {
+ final int len1 = dataLen / 4;
+ // Read len1 bytes
+ byte[] readData = new byte[len1];
+ readAll(in, readData, 0, len1);
+ byte[] expectedData = new byte[len1];
+ System.arraycopy(data, 0, expectedData, 0, len1);
+ Assert.assertArrayEquals(readData, expectedData);
+
+ // Pos: 1/3 dataLen
+ byteBufferReadFullyCheck(in, dataLen / 3);
+
+ // Read len1 bytes
+ readData = new byte[len1];
+ readAll(in, readData, 0, len1);
+ expectedData = new byte[len1];
+ System.arraycopy(data, len1, expectedData, 0, len1);
+ Assert.assertArrayEquals(readData, expectedData);
+
+ // Pos: 1/2 dataLen
+ byteBufferReadFullyCheck(in, dataLen / 2);
+
+ // Read len1 bytes
+ readData = new byte[len1];
+ readAll(in, readData, 0, len1);
+ expectedData = new byte[len1];
+ System.arraycopy(data, 2 * len1, expectedData, 0, len1);
+ Assert.assertArrayEquals(readData, expectedData);
+ }
+ }
+
+ private void byteBufferReadFullyCheck(InputStream in, int pos)
+ throws Exception {
+ ByteBuffer result = ByteBuffer.allocate(dataLen - pos);
+ ((ByteBufferPositionedReadable) in).readFully(pos, result);
+
+ byte[] expectedData = new byte[dataLen - pos];
+ System.arraycopy(data, pos, expectedData, 0, dataLen - pos);
+ Assert.assertArrayEquals(result.array(), expectedData);
+
+ result = ByteBuffer.allocate(dataLen); // Exceeds maximum length
+ try {
+ ((ByteBufferPositionedReadable) in).readFully(pos, result);
+ Assert.fail("Read fully exceeds maximum length should fail.");
+ } catch (EOFException e) {
+ }
+ }
/** Test seek to different position. */
@Test(timeout=120000)
@@ -505,12 +631,40 @@ private void byteBufferReadCheck(InputStream in, ByteBuffer buf,
System.arraycopy(data, 0, expectedData, 0, n);
Assert.assertArrayEquals(readData, expectedData);
}
+
+ private void byteBufferPreadCheck(InputStream in, ByteBuffer buf,
+ int bufPos) throws Exception {
+ // Test reading from position 0
+ buf.position(bufPos);
+ int n = ((ByteBufferPositionedReadable) in).read(0, buf);
+ Assert.assertEquals(bufPos + n, buf.position());
+ byte[] readData = new byte[n];
+ buf.rewind();
+ buf.position(bufPos);
+ buf.get(readData);
+ byte[] expectedData = new byte[n];
+ System.arraycopy(data, 0, expectedData, 0, n);
+ Assert.assertArrayEquals(readData, expectedData);
+
+ // Test reading from half way through the data
+ buf.position(bufPos);
+ n = ((ByteBufferPositionedReadable) in).read(dataLen / 2, buf);
+ Assert.assertEquals(bufPos + n, buf.position());
+ readData = new byte[n];
+ buf.rewind();
+ buf.position(bufPos);
+ buf.get(readData);
+ expectedData = new byte[n];
+ System.arraycopy(data, dataLen / 2, expectedData, 0, n);
+ Assert.assertArrayEquals(readData, expectedData);
+ }
/** Test byte buffer read with different buffer size. */
@Test(timeout=120000)
public void testByteBufferRead() throws Exception {
- OutputStream out = getOutputStream(defaultBufferSize);
- writeData(out);
+ try (OutputStream out = getOutputStream(defaultBufferSize)) {
+ writeData(out);
+ }
// Default buffer size, initial buffer position is 0
InputStream in = getInputStream(defaultBufferSize);
@@ -560,6 +714,53 @@ public void testByteBufferRead() throws Exception {
byteBufferReadCheck(in, buf, 11);
in.close();
}
+
+ /** Test byte buffer pread with different buffer size. */
+ @Test(timeout=120000)
+ public void testByteBufferPread() throws Exception {
+ try (OutputStream out = getOutputStream(defaultBufferSize)) {
+ writeData(out);
+ }
+
+ try (InputStream defaultBuf = getInputStream(defaultBufferSize);
+ InputStream smallBuf = getInputStream(smallBufferSize)) {
+
+ ByteBuffer buf = ByteBuffer.allocate(dataLen + 100);
+
+ // Default buffer size, initial buffer position is 0
+ byteBufferPreadCheck(defaultBuf, buf, 0);
+
+ // Default buffer size, initial buffer position is not 0
+ buf.clear();
+ byteBufferPreadCheck(defaultBuf, buf, 11);
+
+ // Small buffer size, initial buffer position is 0
+ buf.clear();
+ byteBufferPreadCheck(smallBuf, buf, 0);
+
+ // Small buffer size, initial buffer position is not 0
+ buf.clear();
+ byteBufferPreadCheck(smallBuf, buf, 11);
+
+ // Test with direct ByteBuffer
+ buf = ByteBuffer.allocateDirect(dataLen + 100);
+
+ // Direct buffer, default buffer size, initial buffer position is 0
+ byteBufferPreadCheck(defaultBuf, buf, 0);
+
+ // Direct buffer, default buffer size, initial buffer position is not 0
+ buf.clear();
+ byteBufferPreadCheck(defaultBuf, buf, 11);
+
+ // Direct buffer, small buffer size, initial buffer position is 0
+ buf.clear();
+ byteBufferPreadCheck(smallBuf, buf, 0);
+
+ // Direct buffer, small buffer size, initial buffer position is not 0
+ buf.clear();
+ byteBufferPreadCheck(smallBuf, buf, 11);
+ }
+ }
@Test(timeout=120000)
public void testCombinedOp() throws Exception {
@@ -797,5 +998,23 @@ public void testUnbuffer() throws Exception {
// The close will be called when exiting this try-with-resource block
}
}
+
+ // Test ByteBuffer pread
+ try (InputStream in = getInputStream(smallBufferSize)) {
+ if (in instanceof ByteBufferPositionedReadable) {
+ ByteBufferPositionedReadable bbpin = (ByteBufferPositionedReadable) in;
+
+ // Test unbuffer after pread
+ byteBufferPreadCheck(bbpin);
+ ((CanUnbuffer) in).unbuffer();
+
+ // Test pread again after unbuffer
+ byteBufferPreadCheck(bbpin);
+
+ // Test close after unbuffer
+ ((CanUnbuffer) in).unbuffer();
+ // The close will be called when exiting this try-with-resource block
+ }
+ }
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
index cd7391a02c38f..73c6249612387 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
@@ -26,6 +26,7 @@
import java.util.EnumSet;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ByteBufferPositionedReadable;
import org.apache.hadoop.fs.ByteBufferReadable;
import org.apache.hadoop.fs.CanSetDropBehind;
import org.apache.hadoop.fs.CanSetReadahead;
@@ -180,7 +181,7 @@ static class FakeInputStream extends InputStream
implements Seekable, PositionedReadable, ByteBufferReadable,
HasFileDescriptor, CanSetDropBehind, CanSetReadahead,
HasEnhancedByteBufferAccess, CanUnbuffer,
- StreamCapabilities {
+ StreamCapabilities, ByteBufferPositionedReadable {
private final byte[] oneByteBuf = new byte[1];
private int pos = 0;
private final byte[] data;
@@ -303,6 +304,56 @@ public int read(long position, byte[] b, int off, int len)
return -1;
}
+ @Override
+ public int read(long position, ByteBuffer buf) throws IOException {
+ if (buf == null) {
+ throw new NullPointerException();
+ } else if (!buf.hasRemaining()) {
+ return 0;
+ }
+
+ if (position > length) {
+ throw new IOException("Cannot read after EOF.");
+ }
+ if (position < 0) {
+ throw new IOException("Cannot read to negative offset.");
+ }
+
+ checkStream();
+
+ if (position < length) {
+ int n = (int) Math.min(buf.remaining(), length - position);
+ buf.put(data, (int) position, n);
+ return n;
+ }
+
+ return -1;
+ }
+
+ @Override
+ public void readFully(long position, ByteBuffer buf) throws IOException {
+ if (buf == null) {
+ throw new NullPointerException();
+ } else if (!buf.hasRemaining()) {
+ return;
+ }
+
+ if (position > length) {
+ throw new IOException("Cannot read after EOF.");
+ }
+ if (position < 0) {
+ throw new IOException("Cannot read to negative offset.");
+ }
+
+ checkStream();
+
+ if (position + buf.remaining() > length) {
+ throw new EOFException("Reach the end of stream.");
+ }
+
+ buf.put(data, (int) position, buf.remaining());
+ }
+
@Override
public void readFully(long position, byte[] b, int off, int len)
throws IOException {
@@ -378,6 +429,8 @@ public boolean hasCapability(String capability) {
case StreamCapabilities.READAHEAD:
case StreamCapabilities.DROPBEHIND:
case StreamCapabilities.UNBUFFER:
+ case StreamCapabilities.READBYTEBUFFER:
+ case StreamCapabilities.PREADBYTEBUFFER:
return true;
default:
return false;
@@ -439,7 +492,9 @@ public void testHasCapability() throws Exception {
new String[] {
StreamCapabilities.DROPBEHIND,
StreamCapabilities.READAHEAD,
- StreamCapabilities.UNBUFFER
+ StreamCapabilities.UNBUFFER,
+ StreamCapabilities.READBYTEBUFFER,
+ StreamCapabilities.PREADBYTEBUFFER
},
new String[] {
StreamCapabilities.HFLUSH,
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
index bb3fd7a68d722..e7d922e78a64e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
@@ -90,11 +90,21 @@ protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv)
@Override
@Test(timeout=10000)
public void testByteBufferRead() throws Exception {}
+
+ @Ignore("Wrapped stream doesn't support ByteBufferPositionedReadable")
+ @Override
+ @Test(timeout=10000)
+ public void testPositionedReadWithByteBuffer() throws IOException {}
@Ignore("ChecksumFSOutputSummer doesn't support Syncable")
@Override
@Test(timeout=10000)
public void testSyncable() throws IOException {}
+
+ @Ignore("Wrapped stream doesn't support ByteBufferPositionedReadable")
+ @Override
+ @Test(timeout=10000)
+ public void testByteBufferPread() throws IOException {}
@Ignore("ChecksumFSInputChecker doesn't support ByteBuffer read")
@Override
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java
index 7e300777a37a1..036706f435a60 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java
@@ -91,6 +91,11 @@ public void testSyncable() throws IOException {}
@Test(timeout=10000)
public void testPositionedRead() throws IOException {}
+ @Ignore("Wrapped stream doesn't support ByteBufferPositionedReadable")
+ @Override
+ @Test(timeout=10000)
+ public void testPositionedReadWithByteBuffer() throws IOException {}
+
@Ignore("Wrapped stream doesn't support ReadFully")
@Override
@Test(timeout=10000)
@@ -105,6 +110,11 @@ public void testSeek() throws IOException {}
@Override
@Test(timeout=10000)
public void testByteBufferRead() throws IOException {}
+
+ @Ignore("Wrapped stream doesn't support ByteBufferPositionedReadable")
+ @Override
+ @Test(timeout=10000)
+ public void testByteBufferPread() throws IOException {}
@Ignore("Wrapped stream doesn't support ByteBufferRead, Seek")
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index a4bf4542d04d4..f444d677368ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -47,11 +47,13 @@
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.ByteBufferReadable;
+import org.apache.hadoop.fs.ByteBufferPositionedReadable;
import org.apache.hadoop.fs.ByteBufferUtil;
import org.apache.hadoop.fs.CanSetDropBehind;
import org.apache.hadoop.fs.CanSetReadahead;
import org.apache.hadoop.fs.CanUnbuffer;
import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
@@ -100,7 +102,8 @@
@InterfaceAudience.Private
public class DFSInputStream extends FSInputStream
implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
- HasEnhancedByteBufferAccess, CanUnbuffer, StreamCapabilities {
+ HasEnhancedByteBufferAccess, CanUnbuffer, StreamCapabilities,
+ ByteBufferPositionedReadable {
@VisibleForTesting
public static boolean tcpReadsDisabledForTesting = false;
private long hedgedReadOpsLoopNumForTesting = 0;
@@ -1653,6 +1656,27 @@ public void reset() throws IOException {
throw new IOException("Mark/reset not supported");
}
+ @Override
+ public int read(long position, final ByteBuffer buf) throws IOException {
+ if (!buf.hasRemaining()) {
+ return 0;
+ }
+ return pread(position, buf);
+ }
+
+ @Override
+ public void readFully(long position, final ByteBuffer buf)
+ throws IOException {
+ int nread = 0;
+ while (buf.hasRemaining()) {
+ int nbytes = read(position + nread, buf);
+ if (nbytes < 0) {
+ throw new EOFException(FSExceptionMessages.EOF_IN_READ_FULLY);
+ }
+ nread += nbytes;
+ }
+ }
+
/** Utility class to encapsulate data node info and its address. */
static final class DNAddrPair {
final DatanodeInfo info;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
index 18396c7855477..24ec297aa27b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
@@ -148,12 +148,12 @@ endif()
add_subdirectory(main/native/libhdfs)
add_subdirectory(main/native/libhdfs-tests)
+add_subdirectory(main/native/libhdfs-examples)
# Temporary fix to disable Libhdfs++ build on older systems that do not support thread_local
include(CheckCXXSourceCompiles)
unset (THREAD_LOCAL_SUPPORTED CACHE)
-set (CMAKE_CXX_STANDARD 11)
-set (CMAKE_CXX_STANDARD_REQUIRED ON)
+set (CMAKE_REQUIRED_DEFINITIONS "-std=c++11")
set (CMAKE_REQUIRED_LIBRARIES ${CMAKE_THREAD_LIBS_INIT})
check_cxx_source_compiles(
"#include
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/CMakeLists.txt
new file mode 100644
index 0000000000000..1d33639f3db68
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/CMakeLists.txt
@@ -0,0 +1,34 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
+
+include_directories(
+ ${CMAKE_CURRENT_SOURCE_DIR}/../libhdfs/include
+ ${GENERATED_JAVAH}
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_CURRENT_SOURCE_DIR}/../libhdfs
+ ${JNI_INCLUDE_DIRS}
+ ${OS_DIR}
+)
+
+add_executable(hdfs_read libhdfs_read.c)
+target_link_libraries(hdfs_read hdfs)
+
+add_executable(hdfs_write libhdfs_write.c)
+target_link_libraries(hdfs_write hdfs)
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/README.md b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/README.md
new file mode 100644
index 0000000000000..c962feba526c7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/README.md
@@ -0,0 +1,24 @@
+
+
+The files in this directory are purely meant to provide additional examples for how to use libhdfs. They are compiled as
+part of the build and are thus guaranteed to compile against the associated version of lidhdfs. However, no tests exists
+for these examples so their functionality is not guaranteed.
+
+The examples are written to run against a mini-dfs cluster. The script `test-libhdfs.sh` can setup a mini DFS cluster
+that the examples can run against. Again, none of this is tested and is thus not guaranteed to work.
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_read.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_read.c
similarity index 91%
rename from hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_read.c
rename to hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_read.c
index 4b90f2a4ab0be..419be1268b284 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_read.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_read.c
@@ -16,11 +16,16 @@
* limitations under the License.
*/
-#include "hdfs/hdfs.h"
+#include "hdfs/hdfs.h"
#include
#include
+/**
+ * An example of using libhdfs to read files. The usage of this file is as follows:
+ *
+ * Usage: hdfs_read
+ */
int main(int argc, char **argv) {
hdfsFS fs;
const char *rfile = argv[1];
@@ -33,12 +38,12 @@ int main(int argc, char **argv) {
fprintf(stderr, "Usage: hdfs_read \n");
exit(-1);
}
-
+
fs = hdfsConnect("default", 0);
if (!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
exit(-1);
- }
+ }
readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
if (!readFile) {
@@ -51,13 +56,13 @@ int main(int argc, char **argv) {
if(buffer == NULL) {
return -2;
}
-
+
// read from the file
curSize = bufferSize;
for (; curSize == bufferSize;) {
curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
}
-
+
free(buffer);
hdfsCloseFile(fs, readFile);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_write.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_write.c
similarity index 93%
rename from hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_write.c
rename to hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_write.c
index c55c8e330c33b..8fbf87e524439 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_write.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/libhdfs_write.c
@@ -16,13 +16,18 @@
* limitations under the License.
*/
-#include "hdfs/hdfs.h"
+#include "hdfs/hdfs.h"
#include
#include
#include
#include
+/**
+ * An example of using libhdfs to write files. The usage of this file is as follows:
+ *
+ * Usage: hdfs_write
+ */
int main(int argc, char **argv) {
hdfsFS fs;
const char *writeFileName = argv[1];
@@ -40,12 +45,12 @@ int main(int argc, char **argv) {
fprintf(stderr, "Usage: hdfs_write \n");
exit(-1);
}
-
+
fs = hdfsConnect("default", 0);
if (!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
exit(-1);
- }
+ }
// sanity check
if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
@@ -79,7 +84,7 @@ int main(int argc, char **argv) {
// write to the file
for (nrRemaining = fileTotalSize; nrRemaining > 0; nrRemaining -= bufferSize ) {
- curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining;
+ curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining;
if ((written = hdfsWrite(fs, writeFile, (void*)buffer, curSize)) != curSize) {
fprintf(stderr, "ERROR: hdfsWrite returned an error on write: %d\n", written);
exit(-3);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/test-libhdfs.sh
similarity index 98%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh
rename to hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/test-libhdfs.sh
index 3407e9cf8e26a..e43b0a52903dd 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-examples/test-libhdfs.sh
@@ -70,7 +70,7 @@ $HADOOP_HOME/share/hadoop/common/
$HADOOP_HOME/share/hadoop/hdfs
$HADOOP_HOME/share/hadoop/hdfs/lib/"
-for d in $JAR_DIRS; do
+for d in $JAR_DIRS; do
for j in $d/*.jar; do
CLASSPATH=${CLASSPATH}:$j
done;
@@ -114,14 +114,14 @@ LIB_JVM_DIR=`findlibjvm`
echo "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
echo LIB_JVM_DIR = $LIB_JVM_DIR
echo "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
-# Put delays to ensure hdfs is up and running and also shuts down
+# Put delays to ensure hdfs is up and running and also shuts down
# after the tests are complete
rm $HDFS_TEST_CONF_DIR/core-site.xml
$HADOOP_HOME/bin/hadoop jar $HDFS_TEST_JAR \
org.apache.hadoop.test.MiniDFSClusterManager \
-format -nnport 20300 -writeConfig $HDFS_TEST_CONF_DIR/core-site.xml \
- > /tmp/libhdfs-test-cluster.out 2>&1 &
+ > /tmp/libhdfs-test-cluster.out 2>&1 &
MINI_CLUSTER_PID=$!
for i in {1..15}; do
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
index 08fc030bbbbcc..f16cc9eb1b033 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
@@ -29,8 +29,8 @@ include_directories(
add_library(native_mini_dfs
native_mini_dfs.c
- ../libhdfs/common/htable.c
../libhdfs/exception.c
+ ../libhdfs/jclasses.c
../libhdfs/jni_helper.c
${OS_DIR}/mutexes.c
${OS_DIR}/thread_local_storage.c
@@ -39,6 +39,3 @@ add_library(native_mini_dfs
add_executable(test_native_mini_dfs test_native_mini_dfs.c)
target_link_libraries(test_native_mini_dfs native_mini_dfs ${JAVA_JVM_LIBRARY})
add_test(test_test_native_mini_dfs test_native_mini_dfs)
-
-add_executable(test_htable ../libhdfs/common/htable.c test_htable.c)
-target_link_libraries(test_htable ${OS_LINK_LIBRARIES})
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/hdfs_test.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/hdfs_test.h
index 0eab9a68aea7f..f00326317f24a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/hdfs_test.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/hdfs_test.h
@@ -49,6 +49,24 @@ extern "C" {
*/
void hdfsFileDisableDirectRead(struct hdfsFile_internal *file);
+ /**
+ * Determine if a file is using the "direct pread" optimization.
+ *
+ * @param file The HDFS file
+ * @return 1 if the file is using the direct pread optimization,
+ * 0 otherwise.
+ */
+ int hdfsFileUsesDirectPread(struct hdfsFile_internal *file);
+
+ /**
+ * Disable the direct pread optimization for a file.
+ *
+ * This is mainly provided for unit testing purposes.
+ *
+ * @param file The HDFS file
+ */
+ void hdfsFileDisableDirectPread(struct hdfsFile_internal *file);
+
/**
* Disable domain socket security checks.
*
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
index 6938109d53e4d..a69c6efe0c763 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
@@ -17,6 +17,7 @@
*/
#include "exception.h"
+#include "jclasses.h"
#include "jni_helper.h"
#include "native_mini_dfs.h"
#include "platform.h"
@@ -36,9 +37,7 @@
#define MINIDFS_CLUSTER_BUILDER "org/apache/hadoop/hdfs/MiniDFSCluster$Builder"
#define MINIDFS_CLUSTER "org/apache/hadoop/hdfs/MiniDFSCluster"
-#define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
#define HADOOP_NAMENODE "org/apache/hadoop/hdfs/server/namenode/NameNode"
-#define JAVA_INETSOCKETADDRESS "java/net/InetSocketAddress"
struct NativeMiniDfsCluster {
/**
@@ -60,8 +59,7 @@ static int hdfsDisableDomainSocketSecurity(void)
errno = EINTERNAL;
return -1;
}
- jthr = invokeMethod(env, NULL, STATIC, NULL,
- "org/apache/hadoop/net/unix/DomainSocket",
+ jthr = invokeMethod(env, NULL, STATIC, NULL, JC_DOMAIN_SOCKET,
"disableBindPathValidation", "()V");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -126,11 +124,6 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
"nmdCreate: new Configuration");
goto error;
}
- if (jthr) {
- printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
- "nmdCreate: Configuration::setBoolean");
- goto error;
- }
// Disable 'minimum block size' -- it's annoying in tests.
(*env)->DeleteLocalRef(env, jconfStr);
jconfStr = NULL;
@@ -140,8 +133,9 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
"nmdCreate: new String");
goto error;
}
- jthr = invokeMethod(env, NULL, INSTANCE, cobj, HADOOP_CONF,
- "setLong", "(Ljava/lang/String;J)V", jconfStr, 0LL);
+ jthr = invokeMethod(env, NULL, INSTANCE, cobj,
+ JC_CONFIGURATION, "setLong", "(Ljava/lang/String;J)V", jconfStr,
+ 0LL);
if (jthr) {
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"nmdCreate: Configuration::setLong");
@@ -163,7 +157,7 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
goto error;
}
}
- jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
+ jthr = findClassAndInvokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
"format", "(Z)L" MINIDFS_CLUSTER_BUILDER ";", conf->doFormat);
if (jthr) {
printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
@@ -172,7 +166,7 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
}
(*env)->DeleteLocalRef(env, val.l);
if (conf->webhdfsEnabled) {
- jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
+ jthr = findClassAndInvokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
"nameNodeHttpPort", "(I)L" MINIDFS_CLUSTER_BUILDER ";",
conf->namenodeHttpPort);
if (jthr) {
@@ -183,16 +177,16 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
(*env)->DeleteLocalRef(env, val.l);
}
if (conf->numDataNodes) {
- jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
+ jthr = findClassAndInvokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
"numDataNodes", "(I)L" MINIDFS_CLUSTER_BUILDER ";", conf->numDataNodes);
if (jthr) {
printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
"Builder::numDataNodes");
goto error;
}
+ (*env)->DeleteLocalRef(env, val.l);
}
- (*env)->DeleteLocalRef(env, val.l);
- jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
+ jthr = findClassAndInvokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
"build", "()L" MINIDFS_CLUSTER ";");
if (jthr) {
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -242,7 +236,7 @@ int nmdShutdown(struct NativeMiniDfsCluster* cl)
fprintf(stderr, "nmdShutdown: getJNIEnv failed\n");
return -EIO;
}
- jthr = invokeMethod(env, NULL, INSTANCE, cl->obj,
+ jthr = findClassAndInvokeMethod(env, NULL, INSTANCE, cl->obj,
MINIDFS_CLUSTER, "shutdown", "()V");
if (jthr) {
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -260,7 +254,7 @@ int nmdWaitClusterUp(struct NativeMiniDfsCluster *cl)
fprintf(stderr, "nmdWaitClusterUp: getJNIEnv failed\n");
return -EIO;
}
- jthr = invokeMethod(env, NULL, INSTANCE, cl->obj,
+ jthr = findClassAndInvokeMethod(env, NULL, INSTANCE, cl->obj,
MINIDFS_CLUSTER, "waitClusterUp", "()V");
if (jthr) {
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -282,7 +276,7 @@ int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl)
}
// Note: this will have to be updated when HA nativeMiniDfs clusters are
// supported
- jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj,
+ jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, cl->obj,
MINIDFS_CLUSTER, "getNameNodePort", "()I");
if (jthr) {
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -307,7 +301,7 @@ int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
return -EIO;
}
// First get the (first) NameNode of the cluster
- jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj, MINIDFS_CLUSTER,
+ jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, cl->obj, MINIDFS_CLUSTER,
"getNameNode", "()L" HADOOP_NAMENODE ";");
if (jthr) {
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -318,8 +312,8 @@ int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
jNameNode = jVal.l;
// Then get the http address (InetSocketAddress) of the NameNode
- jthr = invokeMethod(env, &jVal, INSTANCE, jNameNode, HADOOP_NAMENODE,
- "getHttpAddress", "()L" JAVA_INETSOCKETADDRESS ";");
+ jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, jNameNode, HADOOP_NAMENODE,
+ "getHttpAddress", "()L" JAVA_NET_ISA ";");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"nmdGetNameNodeHttpAddress: "
@@ -328,8 +322,8 @@ int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
}
jAddress = jVal.l;
- jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
- JAVA_INETSOCKETADDRESS, "getPort", "()I");
+ jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, jAddress,
+ JAVA_NET_ISA, "getPort", "()I");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"nmdGetNameNodeHttpAddress: "
@@ -338,7 +332,7 @@ int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
}
*port = jVal.i;
- jthr = invokeMethod(env, &jVal, INSTANCE, jAddress, JAVA_INETSOCKETADDRESS,
+ jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, jAddress, JAVA_NET_ISA,
"getHostName", "()Ljava/lang/String;");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_htable.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_htable.c
deleted file mode 100644
index 0c3861bfa7f9a..0000000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_htable.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "common/htable.h"
-#include "expect.h"
-#include "hdfs_test.h"
-
-#include
-#include
-#include
-#include
-#include
-
-// Disable type cast and loss of precision warnings, because the test
-// manipulates void* values manually on purpose.
-#ifdef WIN32
-#pragma warning(disable: 4244 4306)
-#endif
-
-static uint32_t simple_hash(const void *key, uint32_t size)
-{
- uintptr_t k = (uintptr_t)key;
- return ((13 + k) * 6367) % size;
-}
-
-static int simple_compare(const void *a, const void *b)
-{
- return a == b;
-}
-
-static void expect_102(void *f, void *k, void *v)
-{
- int *found_102 = f;
- uintptr_t key = (uintptr_t)k;
- uintptr_t val = (uintptr_t)v;
-
- if ((key == 2) && (val == 102)) {
- *found_102 = 1;
- } else {
- abort();
- }
-}
-
-static void *htable_pop_val(struct htable *ht, void *key)
-{
- void *old_key, *old_val;
-
- htable_pop(ht, key, &old_key, &old_val);
- return old_val;
-}
-
-int main(void)
-{
- struct htable *ht;
- int found_102 = 0;
-
- ht = htable_alloc(4, simple_hash, simple_compare);
- EXPECT_INT_EQ(0, htable_used(ht));
- EXPECT_INT_EQ(4, htable_capacity(ht));
- EXPECT_NULL(htable_get(ht, (void*)123));
- EXPECT_NULL(htable_pop_val(ht, (void*)123));
- EXPECT_ZERO(htable_put(ht, (void*)123, (void*)456));
- EXPECT_INT_EQ(456, (uintptr_t)htable_get(ht, (void*)123));
- EXPECT_INT_EQ(456, (uintptr_t)htable_pop_val(ht, (void*)123));
- EXPECT_NULL(htable_pop_val(ht, (void*)123));
-
- // Enlarge the hash table
- EXPECT_ZERO(htable_put(ht, (void*)1, (void*)101));
- EXPECT_ZERO(htable_put(ht, (void*)2, (void*)102));
- EXPECT_ZERO(htable_put(ht, (void*)3, (void*)103));
- EXPECT_INT_EQ(3, htable_used(ht));
- EXPECT_INT_EQ(8, htable_capacity(ht));
- EXPECT_INT_EQ(102, (uintptr_t)htable_get(ht, (void*)2));
- EXPECT_INT_EQ(101, (uintptr_t)htable_pop_val(ht, (void*)1));
- EXPECT_INT_EQ(103, (uintptr_t)htable_pop_val(ht, (void*)3));
- EXPECT_INT_EQ(1, htable_used(ht));
- htable_visit(ht, expect_102, &found_102);
- EXPECT_INT_EQ(1, found_102);
- htable_free(ht);
-
- fprintf(stderr, "SUCCESS.\n");
- return EXIT_SUCCESS;
-}
-
-// vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c
index d69aa37794848..ebf0dd7e1e33e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c
@@ -16,8 +16,10 @@
* limitations under the License.
*/
-#include "hdfs/hdfs.h"
-#include "hdfs_test.h"
+#include "expect.h"
+#include "hdfs/hdfs.h"
+#include "hdfs_test.h"
+#include "native_mini_dfs.h"
#include "platform.h"
#include
@@ -59,7 +61,18 @@ void permission_disp(short permissions, char *rtr) {
strncpy(rtr, perm, 3);
rtr+=3;
}
-}
+}
+
+/**
+ * Shutdown and free the given mini cluster, and then exit with the provided exit_code. This method is meant to be
+ * called with a non-zero exit code, which is why we ignore the return status of calling MiniDFSCluster#shutdown since
+ * the process is going to fail anyway.
+ */
+void shutdown_and_exit(struct NativeMiniDfsCluster* cl, int exit_code) {
+ nmdShutdown(cl);
+ nmdFree(cl);
+ exit(exit_code);
+}
int main(int argc, char **argv) {
const char *writePath = "/tmp/testfile.txt";
@@ -75,9 +88,9 @@ int main(int argc, char **argv) {
const char *userPath = "/tmp/usertestfile.txt";
char buffer[32], buffer2[256], rdbuffer[32];
- tSize num_written_bytes, num_read_bytes;
+ tSize num_written_bytes, num_read_bytes, num_pread_bytes;
hdfsFS fs, lfs;
- hdfsFile writeFile, readFile, localFile, appendFile, userFile;
+ hdfsFile writeFile, readFile, preadFile, localFile, appendFile, userFile;
tOffset currentPos, seekPos;
int exists, totalResult, result, numEntries, i, j;
const char *resp;
@@ -88,25 +101,56 @@ int main(int argc, char **argv) {
short newPerm = 0666;
tTime newMtime, newAtime;
- fs = hdfsConnectNewInstance("default", 0);
+ // Create and start the mini cluster
+ struct NativeMiniDfsCluster* cl;
+ struct NativeMiniDfsConf conf = {
+ 1, /* doFormat */
+ };
+
+ cl = nmdCreate(&conf);
+ EXPECT_NONNULL(cl);
+ EXPECT_ZERO(nmdWaitClusterUp(cl));
+ tPort port;
+ port = (tPort) nmdGetNameNodePort(cl);
+
+ // Create a hdfs connection to the mini cluster
+ struct hdfsBuilder *bld;
+ bld = hdfsNewBuilder();
+ EXPECT_NONNULL(bld);
+
+ hdfsBuilderSetForceNewInstance(bld);
+ hdfsBuilderSetNameNode(bld, "localhost");
+ hdfsBuilderSetNameNodePort(bld, port);
+ // The HDFS append tests require setting this property otherwise the tests fail with:
+ //
+ // IOException: Failed to replace a bad datanode on the existing pipeline due to no more good datanodes being
+ // available to try. The current failed datanode replacement policy is DEFAULT, and a client may configure this
+ // via 'dfs.client.block.write.replace-datanode-on-failure.policy' in its configuration.
+ //
+ // It seems that when operating against a mini DFS cluster, some HDFS append tests require setting this property
+ // (for example, see TestFileAppend#testMultipleAppends)
+ hdfsBuilderConfSetStr(bld, "dfs.client.block.write.replace-datanode-on-failure.enable", "false");
+
+ fs = hdfsBuilderConnect(bld);
+
if(!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
lfs = hdfsConnectNewInstance(NULL, 0);
if(!lfs) {
fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
{
- //Write tests
+ // Write tests
writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!writeFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writePath);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
num_written_bytes =
@@ -115,7 +159,7 @@ int main(int argc, char **argv) {
if (num_written_bytes != strlen(fileContents) + 1) {
fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
(int)(strlen(fileContents) + 1), (int)num_written_bytes);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
@@ -124,19 +168,19 @@ int main(int argc, char **argv) {
fprintf(stderr,
"Failed to get current file position correctly! Got %" PRId64 "!\n",
currentPos);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
if (hdfsFlush(fs, writeFile)) {
fprintf(stderr, "Failed to 'flush' %s\n", writePath);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
fprintf(stderr, "Flushed %s successfully!\n", writePath);
if (hdfsHFlush(fs, writeFile)) {
fprintf(stderr, "Failed to 'hflush' %s\n", writePath);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
fprintf(stderr, "HFlushed %s successfully!\n", writePath);
@@ -144,26 +188,26 @@ int main(int argc, char **argv) {
}
{
- //Read tests
+ // Read tests
exists = hdfsExists(fs, readPath);
if (exists) {
fprintf(stderr, "Failed to validate existence of %s\n", readPath);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
if (!readFile) {
fprintf(stderr, "Failed to open %s for reading!\n", readPath);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
if (!hdfsFileIsOpenForRead(readFile)) {
fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
"with O_RDONLY, and it did not show up as 'open for "
"read'\n");
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
@@ -171,7 +215,7 @@ int main(int argc, char **argv) {
seekPos = 1;
if(hdfsSeek(fs, readFile, seekPos)) {
fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
currentPos = -1;
@@ -179,14 +223,14 @@ int main(int argc, char **argv) {
fprintf(stderr,
"Failed to get current file position correctly! Got %" PRId64 "!\n",
currentPos);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
if (!hdfsFileUsesDirectRead(readFile)) {
fprintf(stderr, "Direct read support incorrectly not detected "
"for HDFS filesystem\n");
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
fprintf(stderr, "Direct read support detected for HDFS\n");
@@ -194,7 +238,7 @@ int main(int argc, char **argv) {
// Test the direct read path
if(hdfsSeek(fs, readFile, 0)) {
fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
memset(buffer, 0, sizeof(buffer));
num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
@@ -202,30 +246,41 @@ int main(int argc, char **argv) {
if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
fileContents, buffer, num_read_bytes);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
fprintf(stderr, "Read (direct) following %d bytes:\n%s\n",
num_read_bytes, buffer);
+ memset(buffer, 0, strlen(fileContents + 1));
if (hdfsSeek(fs, readFile, 0L)) {
fprintf(stderr, "Failed to seek to file start!\n");
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
// Disable the direct read path so that we really go through the slow
// read path
hdfsFileDisableDirectRead(readFile);
- num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
- sizeof(buffer));
- fprintf(stderr, "Read following %d bytes:\n%s\n",
- num_read_bytes, buffer);
+ if (hdfsFileUsesDirectRead(readFile)) {
+ fprintf(stderr, "Disabled direct reads, but it is still enabled");
+ shutdown_and_exit(cl, -1);
+ }
- memset(buffer, 0, strlen(fileContents + 1));
+ if (!hdfsFileUsesDirectPread(readFile)) {
+ fprintf(stderr, "Disabled direct reads, but direct preads was "
+ "disabled as well");
+ shutdown_and_exit(cl, -1);
+ }
- num_read_bytes = hdfsPread(fs, readFile, 0, (void*)buffer,
+ num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
sizeof(buffer));
- fprintf(stderr, "Read following %d bytes:\n%s\n",
+ if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+ fprintf(stderr, "Failed to read. Expected %s but got %s (%d bytes)\n",
+ fileContents, buffer, num_read_bytes);
+ shutdown_and_exit(cl, -1);
+ }
+ fprintf(stderr, "Read following %d bytes:\n%s\n",
num_read_bytes, buffer);
+ memset(buffer, 0, strlen(fileContents + 1));
hdfsCloseFile(fs, readFile);
@@ -233,7 +288,7 @@ int main(int argc, char **argv) {
localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!localFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writePath);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents,
@@ -245,7 +300,116 @@ int main(int argc, char **argv) {
if (hdfsFileUsesDirectRead(localFile)) {
fprintf(stderr, "Direct read support incorrectly detected for local "
"filesystem\n");
- exit(-1);
+ shutdown_and_exit(cl, -1);
+ }
+
+ hdfsCloseFile(lfs, localFile);
+ }
+
+ {
+ // Pread tests
+
+ exists = hdfsExists(fs, readPath);
+
+ if (exists) {
+ fprintf(stderr, "Failed to validate existence of %s\n", readPath);
+ shutdown_and_exit(cl, -1);
+ }
+
+ preadFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
+ if (!preadFile) {
+ fprintf(stderr, "Failed to open %s for reading!\n", readPath);
+ shutdown_and_exit(cl, -1);
+ }
+
+ if (!hdfsFileIsOpenForRead(preadFile)) {
+ fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
+ "with O_RDONLY, and it did not show up as 'open for "
+ "read'\n");
+ shutdown_and_exit(cl, -1);
+ }
+
+ fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, preadFile));
+
+ num_pread_bytes = hdfsPread(fs, preadFile, 0, (void*)buffer, sizeof(buffer));
+ if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+ fprintf(stderr, "Failed to pread (direct). Expected %s but got %s (%d bytes)\n",
+ fileContents, buffer, num_read_bytes);
+ shutdown_and_exit(cl, -1);
+ }
+ fprintf(stderr, "Pread (direct) following %d bytes:\n%s\n",
+ num_pread_bytes, buffer);
+ memset(buffer, 0, strlen(fileContents + 1));
+ if (hdfsTell(fs, preadFile) != 0) {
+ fprintf(stderr, "Pread changed position of file\n");
+ shutdown_and_exit(cl, -1);
+ }
+
+ // Test pread midway through the file rather than at the beginning
+ const char *fileContentsChunk = "World!";
+ num_pread_bytes = hdfsPread(fs, preadFile, 7, (void*)buffer, sizeof(buffer));
+ if (strncmp(fileContentsChunk, buffer, strlen(fileContentsChunk)) != 0) {
+ fprintf(stderr, "Failed to pread (direct). Expected %s but got %s (%d bytes)\n",
+ fileContentsChunk, buffer, num_read_bytes);
+ shutdown_and_exit(cl, -1);
+ }
+ fprintf(stderr, "Pread (direct) following %d bytes:\n%s\n", num_pread_bytes, buffer);
+ memset(buffer, 0, strlen(fileContents + 1));
+ if (hdfsTell(fs, preadFile) != 0) {
+ fprintf(stderr, "Pread changed position of file\n");
+ shutdown_and_exit(cl, -1);
+ }
+
+ // Disable the direct pread path so that we really go through the slow
+ // read path
+ hdfsFileDisableDirectPread(preadFile);
+
+ if (hdfsFileUsesDirectPread(preadFile)) {
+ fprintf(stderr, "Disabled direct preads, but it is still enabled");
+ shutdown_and_exit(cl, -1);
+ }
+
+ if (!hdfsFileUsesDirectRead(preadFile)) {
+ fprintf(stderr, "Disabled direct preads, but direct read was "
+ "disabled as well");
+ shutdown_and_exit(cl, -1);
+ }
+
+ num_pread_bytes = hdfsPread(fs, preadFile, 0, (void*)buffer, sizeof(buffer));
+ if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+ fprintf(stderr, "Failed to pread. Expected %s but got %s (%d bytes)\n",
+ fileContents, buffer, num_pread_bytes);
+ shutdown_and_exit(cl, -1);
+ }
+ fprintf(stderr, "Pread following %d bytes:\n%s\n", num_pread_bytes, buffer);
+ memset(buffer, 0, strlen(fileContents + 1));
+ if (hdfsTell(fs, preadFile) != 0) {
+ fprintf(stderr, "Pread changed position of file\n");
+ shutdown_and_exit(cl, -1);
+ }
+
+ num_pread_bytes = hdfsPread(fs, preadFile, 7, (void*)buffer, sizeof(buffer));
+ if (strncmp(fileContentsChunk, buffer, strlen(fileContentsChunk)) != 0) {
+ fprintf(stderr, "Failed to pread (direct). Expected %s but got %s (%d bytes)\n",
+ fileContentsChunk, buffer, num_read_bytes);
+ shutdown_and_exit(cl, -1);
+ }
+ fprintf(stderr, "Pread (direct) following %d bytes:\n%s\n", num_pread_bytes, buffer);
+ memset(buffer, 0, strlen(fileContents + 1));
+ if (hdfsTell(fs, preadFile) != 0) {
+ fprintf(stderr, "Pread changed position of file\n");
+ shutdown_and_exit(cl, -1);
+ }
+
+ hdfsCloseFile(fs, preadFile);
+
+ // Test correct behaviour for unsupported filesystems
+ localFile = hdfsOpenFile(lfs, writePath, O_RDONLY, 0, 0, 0);
+
+ if (hdfsFileUsesDirectPread(localFile)) {
+ fprintf(stderr, "Direct pread support incorrectly detected for local "
+ "filesystem\n");
+ shutdown_and_exit(cl, -1);
}
hdfsCloseFile(lfs, localFile);
@@ -425,7 +589,7 @@ int main(int argc, char **argv) {
appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
if(!appendFile) {
fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
@@ -435,10 +599,10 @@ int main(int argc, char **argv) {
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
if (hdfsFlush(fs, appendFile)) {
- fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
- exit(-1);
+ fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
+ shutdown_and_exit(cl, -1);
}
- fprintf(stderr, "Flushed %s successfully!\n", appendPath);
+ fprintf(stderr, "Flushed %s successfully!\n", appendPath);
hdfsCloseFile(fs, appendFile);
@@ -446,7 +610,7 @@ int main(int argc, char **argv) {
appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0);
if(!appendFile) {
fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
@@ -456,10 +620,10 @@ int main(int argc, char **argv) {
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
if (hdfsFlush(fs, appendFile)) {
- fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
- exit(-1);
+ fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
+ shutdown_and_exit(cl, -1);
}
- fprintf(stderr, "Flushed %s successfully!\n", appendPath);
+ fprintf(stderr, "Flushed %s successfully!\n", appendPath);
hdfsCloseFile(fs, appendFile);
@@ -472,11 +636,11 @@ int main(int argc, char **argv) {
readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
if (!readFile) {
fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
- fprintf(stderr, "Read following %d bytes:\n%s\n",
+ fprintf(stderr, "Read following %d bytes:\n%s\n",
num_read_bytes, rdbuffer);
fprintf(stderr, "read == Hello, World %s\n", ((result = (strcmp(rdbuffer, "Hello, World"))) == 0 ? "Success!" : "Failed!"));
@@ -496,16 +660,16 @@ int main(int argc, char **argv) {
// the actual fs user capabilities. Thus just create a file and read
// the owner is correct.
- fs = hdfsConnectAsUserNewInstance("default", 0, tuser);
+ fs = hdfsConnectAsUserNewInstance("localhost", port, tuser);
if(!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!userFile) {
fprintf(stderr, "Failed to open %s for writing!\n", userPath);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
fprintf(stderr, "Opened %s for writing successfully...\n", userPath);
@@ -515,7 +679,7 @@ int main(int argc, char **argv) {
if (hdfsFlush(fs, userFile)) {
fprintf(stderr, "Failed to 'flush' %s\n", userPath);
- exit(-1);
+ shutdown_and_exit(cl, -1);
}
fprintf(stderr, "Flushed %s successfully!\n", userPath);
@@ -528,6 +692,9 @@ int main(int argc, char **argv) {
totalResult += (hdfsDisconnect(fs) != 0);
+ EXPECT_ZERO(nmdShutdown(cl));
+ nmdFree(cl);
+
if (totalResult != 0) {
return -1;
} else {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
index 08765f5e28046..a7fb311125110 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
@@ -35,7 +35,7 @@ hadoop_add_dual_library(hdfs
exception.c
jni_helper.c
hdfs.c
- common/htable.c
+ jclasses.c
${OS_DIR}/mutexes.c
${OS_DIR}/thread_local_storage.c
)
@@ -55,11 +55,9 @@ set_target_properties(hdfs PROPERTIES
SOVERSION ${LIBHDFS_VERSION})
build_libhdfs_test(test_libhdfs_ops hdfs_static test_libhdfs_ops.c)
-link_libhdfs_test(test_libhdfs_ops hdfs_static ${JAVA_JVM_LIBRARY})
-build_libhdfs_test(test_libhdfs_reads hdfs_static test_libhdfs_read.c)
-link_libhdfs_test(test_libhdfs_reads hdfs_static ${JAVA_JVM_LIBRARY})
-build_libhdfs_test(test_libhdfs_write hdfs_static test_libhdfs_write.c)
-link_libhdfs_test(test_libhdfs_write hdfs_static ${JAVA_JVM_LIBRARY})
+link_libhdfs_test(test_libhdfs_ops hdfs_static native_mini_dfs ${JAVA_JVM_LIBRARY})
+add_libhdfs_test(test_libhdfs_ops hdfs_static)
+
build_libhdfs_test(test_libhdfs_threaded hdfs_static expect.c test_libhdfs_threaded.c ${OS_DIR}/thread.c)
link_libhdfs_test(test_libhdfs_threaded hdfs_static native_mini_dfs)
add_libhdfs_test(test_libhdfs_threaded hdfs_static)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.c
deleted file mode 100644
index 50c89ea9cf707..0000000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "common/htable.h"
-
-#include
-#include
-#include
-#include
-#include
-
-struct htable_pair {
- void *key;
- void *val;
-};
-
-/**
- * A hash table which uses linear probing.
- */
-struct htable {
- uint32_t capacity;
- uint32_t used;
- htable_hash_fn_t hash_fun;
- htable_eq_fn_t eq_fun;
- struct htable_pair *elem;
-};
-
-/**
- * An internal function for inserting a value into the hash table.
- *
- * Note: this function assumes that you have made enough space in the table.
- *
- * @param nelem The new element to insert.
- * @param capacity The capacity of the hash table.
- * @param hash_fun The hash function to use.
- * @param key The key to insert.
- * @param val The value to insert.
- */
-static void htable_insert_internal(struct htable_pair *nelem,
- uint32_t capacity, htable_hash_fn_t hash_fun, void *key,
- void *val)
-{
- uint32_t i;
-
- i = hash_fun(key, capacity);
- while (1) {
- if (!nelem[i].key) {
- nelem[i].key = key;
- nelem[i].val = val;
- return;
- }
- i++;
- if (i == capacity) {
- i = 0;
- }
- }
-}
-
-static int htable_realloc(struct htable *htable, uint32_t new_capacity)
-{
- struct htable_pair *nelem;
- uint32_t i, old_capacity = htable->capacity;
- htable_hash_fn_t hash_fun = htable->hash_fun;
-
- nelem = calloc(new_capacity, sizeof(struct htable_pair));
- if (!nelem) {
- return ENOMEM;
- }
- for (i = 0; i < old_capacity; i++) {
- struct htable_pair *pair = htable->elem + i;
- if (pair->key) {
- htable_insert_internal(nelem, new_capacity, hash_fun,
- pair->key, pair->val);
- }
- }
- free(htable->elem);
- htable->elem = nelem;
- htable->capacity = new_capacity;
- return 0;
-}
-
-static uint32_t round_up_to_power_of_2(uint32_t i)
-{
- if (i == 0) {
- return 1;
- }
- i--;
- i |= i >> 1;
- i |= i >> 2;
- i |= i >> 4;
- i |= i >> 8;
- i |= i >> 16;
- i++;
- return i;
-}
-
-struct htable *htable_alloc(uint32_t size,
- htable_hash_fn_t hash_fun, htable_eq_fn_t eq_fun)
-{
- struct htable *htable;
-
- htable = calloc(1, sizeof(*htable));
- if (!htable) {
- return NULL;
- }
- size = round_up_to_power_of_2(size);
- if (size < HTABLE_MIN_SIZE) {
- size = HTABLE_MIN_SIZE;
- }
- htable->hash_fun = hash_fun;
- htable->eq_fun = eq_fun;
- htable->used = 0;
- if (htable_realloc(htable, size)) {
- free(htable);
- return NULL;
- }
- return htable;
-}
-
-void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx)
-{
- uint32_t i;
-
- for (i = 0; i != htable->capacity; ++i) {
- struct htable_pair *elem = htable->elem + i;
- if (elem->key) {
- fun(ctx, elem->key, elem->val);
- }
- }
-}
-
-void htable_free(struct htable *htable)
-{
- if (htable) {
- free(htable->elem);
- free(htable);
- }
-}
-
-int htable_put(struct htable *htable, void *key, void *val)
-{
- int ret;
- uint32_t nused;
-
- // NULL is not a valid key value.
- // This helps us implement htable_get_internal efficiently, since we know
- // that we can stop when we encounter the first NULL key.
- if (!key) {
- return EINVAL;
- }
- // NULL is not a valid value. Otherwise the results of htable_get would
- // be confusing (does a NULL return mean entry not found, or that the
- // entry was found and was NULL?)
- if (!val) {
- return EINVAL;
- }
- // Re-hash if we have used more than half of the hash table
- nused = htable->used + 1;
- if (nused >= (htable->capacity / 2)) {
- ret = htable_realloc(htable, htable->capacity * 2);
- if (ret)
- return ret;
- }
- htable_insert_internal(htable->elem, htable->capacity,
- htable->hash_fun, key, val);
- htable->used++;
- return 0;
-}
-
-static int htable_get_internal(const struct htable *htable,
- const void *key, uint32_t *out)
-{
- uint32_t start_idx, idx;
-
- start_idx = htable->hash_fun(key, htable->capacity);
- idx = start_idx;
- while (1) {
- struct htable_pair *pair = htable->elem + idx;
- if (!pair->key) {
- // We always maintain the invariant that the entries corresponding
- // to a given key are stored in a contiguous block, not separated
- // by any NULLs. So if we encounter a NULL, our search is over.
- return ENOENT;
- } else if (htable->eq_fun(pair->key, key)) {
- *out = idx;
- return 0;
- }
- idx++;
- if (idx == htable->capacity) {
- idx = 0;
- }
- if (idx == start_idx) {
- return ENOENT;
- }
- }
-}
-
-void *htable_get(const struct htable *htable, const void *key)
-{
- uint32_t idx;
-
- if (htable_get_internal(htable, key, &idx)) {
- return NULL;
- }
- return htable->elem[idx].val;
-}
-
-void htable_pop(struct htable *htable, const void *key,
- void **found_key, void **found_val)
-{
- uint32_t hole, i;
- const void *nkey;
-
- if (htable_get_internal(htable, key, &hole)) {
- *found_key = NULL;
- *found_val = NULL;
- return;
- }
- i = hole;
- htable->used--;
- // We need to maintain the compactness invariant used in
- // htable_get_internal. This invariant specifies that the entries for any
- // given key are never separated by NULLs (although they may be separated
- // by entries for other keys.)
- while (1) {
- i++;
- if (i == htable->capacity) {
- i = 0;
- }
- nkey = htable->elem[i].key;
- if (!nkey) {
- *found_key = htable->elem[hole].key;
- *found_val = htable->elem[hole].val;
- htable->elem[hole].key = NULL;
- htable->elem[hole].val = NULL;
- return;
- } else if (htable->eq_fun(key, nkey)) {
- htable->elem[hole].key = htable->elem[i].key;
- htable->elem[hole].val = htable->elem[i].val;
- hole = i;
- }
- }
-}
-
-uint32_t htable_used(const struct htable *htable)
-{
- return htable->used;
-}
-
-uint32_t htable_capacity(const struct htable *htable)
-{
- return htable->capacity;
-}
-
-uint32_t ht_hash_string(const void *str, uint32_t max)
-{
- const char *s = str;
- uint32_t hash = 0;
-
- while (*s) {
- hash = (hash * 31) + *s;
- s++;
- }
- return hash % max;
-}
-
-int ht_compare_string(const void *a, const void *b)
-{
- return strcmp(a, b) == 0;
-}
-
-// vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.h
deleted file mode 100644
index 33f1229051582..0000000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/common/htable.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef HADOOP_CORE_COMMON_HASH_TABLE
-#define HADOOP_CORE_COMMON_HASH_TABLE
-
-#include
-#include
-#include
-
-#define HTABLE_MIN_SIZE 4
-
-struct htable;
-
-/**
- * An HTable hash function.
- *
- * @param key The key.
- * @param capacity The total capacity.
- *
- * @return The hash slot. Must be less than the capacity.
- */
-typedef uint32_t (*htable_hash_fn_t)(const void *key, uint32_t capacity);
-
-/**
- * An HTable equality function. Compares two keys.
- *
- * @param a First key.
- * @param b Second key.
- *
- * @return nonzero if the keys are equal.
- */
-typedef int (*htable_eq_fn_t)(const void *a, const void *b);
-
-/**
- * Allocate a new hash table.
- *
- * @param capacity The minimum suggested starting capacity.
- * @param hash_fun The hash function to use in this hash table.
- * @param eq_fun The equals function to use in this hash table.
- *
- * @return The new hash table on success; NULL on OOM.
- */
-struct htable *htable_alloc(uint32_t capacity, htable_hash_fn_t hash_fun,
- htable_eq_fn_t eq_fun);
-
-typedef void (*visitor_fn_t)(void *ctx, void *key, void *val);
-
-/**
- * Visit all of the entries in the hash table.
- *
- * @param htable The hash table.
- * @param fun The callback function to invoke on each key and value.
- * @param ctx Context pointer to pass to the callback.
- */
-void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx);
-
-/**
- * Free the hash table.
- *
- * It is up the calling code to ensure that the keys and values inside the
- * table are de-allocated, if that is necessary.
- *
- * @param htable The hash table.
- */
-void htable_free(struct htable *htable);
-
-/**
- * Add an entry to the hash table.
- *
- * @param htable The hash table.
- * @param key The key to add. This cannot be NULL.
- * @param fun The value to add. This cannot be NULL.
- *
- * @return 0 on success;
- * EEXIST if the value already exists in the table;
- * ENOMEM if there is not enough memory to add the element.
- * EFBIG if the hash table has too many entries to fit in 32
- * bits.
- */
-int htable_put(struct htable *htable, void *key, void *val);
-
-/**
- * Get an entry from the hash table.
- *
- * @param htable The hash table.
- * @param key The key to find.
- *
- * @return NULL if there is no such entry; the entry otherwise.
- */
-void *htable_get(const struct htable *htable, const void *key);
-
-/**
- * Get an entry from the hash table and remove it.
- *
- * @param htable The hash table.
- * @param key The key for the entry find and remove.
- * @param found_key (out param) NULL if the entry was not found; the found key
- * otherwise.
- * @param found_val (out param) NULL if the entry was not found; the found
- * value otherwise.
- */
-void htable_pop(struct htable *htable, const void *key,
- void **found_key, void **found_val);
-
-/**
- * Get the number of entries used in the hash table.
- *
- * @param htable The hash table.
- *
- * @return The number of entries used in the hash table.
- */
-uint32_t htable_used(const struct htable *htable);
-
-/**
- * Get the capacity of the hash table.
- *
- * @param htable The hash table.
- *
- * @return The capacity of the hash table.
- */
-uint32_t htable_capacity(const struct htable *htable);
-
-/**
- * Hash a string.
- *
- * @param str The string.
- * @param max Maximum hash value
- *
- * @return A number less than max.
- */
-uint32_t ht_hash_string(const void *str, uint32_t max);
-
-/**
- * Compare two strings.
- *
- * @param a The first string.
- * @param b The second string.
- *
- * @return 1 if the strings are identical; 0 otherwise.
- */
-int ht_compare_string(const void *a, const void *b);
-
-#endif
-
-// vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
index bcbb851534d88..fec9a103b4e23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
@@ -18,6 +18,7 @@
#include "exception.h"
#include "hdfs/hdfs.h"
+#include "jclasses.h"
#include "jni_helper.h"
#include "platform.h"
@@ -129,9 +130,8 @@ static char* getExceptionUtilString(JNIEnv *env, jthrowable exc, char *methodNam
jvalue jVal;
jstring jStr = NULL;
char *excString = NULL;
- jthr = invokeMethod(env, &jVal, STATIC, NULL,
- "org/apache/commons/lang3/exception/ExceptionUtils",
- methodName, "(Ljava/lang/Throwable;)Ljava/lang/String;", exc);
+ jthr = invokeMethod(env, &jVal, STATIC, NULL, JC_EXCEPTION_UTILS,
+ methodName, "(Ljava/lang/Throwable;)Ljava/lang/String;", exc);
if (jthr) {
destroyLocalReference(env, jthr);
return NULL;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
index 2d1b7e2fcc2cb..93751961481e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
@@ -18,6 +18,7 @@
#include "exception.h"
#include "hdfs/hdfs.h"
+#include "jclasses.h"
#include "jni_helper.h"
#include "platform.h"
@@ -26,23 +27,6 @@
#include
#include
-/* Some frequently used Java paths */
-#define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
-#define HADOOP_PATH "org/apache/hadoop/fs/Path"
-#define HADOOP_LOCALFS "org/apache/hadoop/fs/LocalFileSystem"
-#define HADOOP_FS "org/apache/hadoop/fs/FileSystem"
-#define HADOOP_FSSTATUS "org/apache/hadoop/fs/FsStatus"
-#define HADOOP_BLK_LOC "org/apache/hadoop/fs/BlockLocation"
-#define HADOOP_DFS "org/apache/hadoop/hdfs/DistributedFileSystem"
-#define HADOOP_ISTRM "org/apache/hadoop/fs/FSDataInputStream"
-#define HADOOP_OSTRM "org/apache/hadoop/fs/FSDataOutputStream"
-#define HADOOP_STAT "org/apache/hadoop/fs/FileStatus"
-#define HADOOP_FSPERM "org/apache/hadoop/fs/permission/FsPermission"
-#define JAVA_NET_ISA "java/net/InetSocketAddress"
-#define JAVA_NET_URI "java/net/URI"
-#define JAVA_STRING "java/lang/String"
-#define READ_OPTION "org/apache/hadoop/fs/ReadOption"
-
#define JAVA_VOID "V"
/* Macros for constructing method signatures */
@@ -109,9 +93,8 @@ int hdfsGetHedgedReadMetrics(hdfsFS fs, struct hdfsHedgedReadMetrics **metrics)
}
jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
- HADOOP_DFS,
- "getHedgedReadMetrics",
- "()Lorg/apache/hadoop/hdfs/DFSHedgedReadMetrics;");
+ JC_DISTRIBUTED_FILE_SYSTEM, "getHedgedReadMetrics",
+ "()Lorg/apache/hadoop/hdfs/DFSHedgedReadMetrics;");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetHedgedReadMetrics: getHedgedReadMetrics failed");
@@ -126,8 +109,7 @@ int hdfsGetHedgedReadMetrics(hdfsFS fs, struct hdfsHedgedReadMetrics **metrics)
}
jthr = invokeMethod(env, &jVal, INSTANCE, hedgedReadMetrics,
- "org/apache/hadoop/hdfs/DFSHedgedReadMetrics",
- "getHedgedReadOps", "()J");
+ JC_DFS_HEDGED_READ_METRICS, "getHedgedReadOps", "()J");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetHedgedReadStatistics: getHedgedReadOps failed");
@@ -136,8 +118,7 @@ int hdfsGetHedgedReadMetrics(hdfsFS fs, struct hdfsHedgedReadMetrics **metrics)
m->hedgedReadOps = jVal.j;
jthr = invokeMethod(env, &jVal, INSTANCE, hedgedReadMetrics,
- "org/apache/hadoop/hdfs/DFSHedgedReadMetrics",
- "getHedgedReadWins", "()J");
+ JC_DFS_HEDGED_READ_METRICS, "getHedgedReadWins", "()J");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetHedgedReadStatistics: getHedgedReadWins failed");
@@ -146,8 +127,7 @@ int hdfsGetHedgedReadMetrics(hdfsFS fs, struct hdfsHedgedReadMetrics **metrics)
m->hedgedReadOpsWin = jVal.j;
jthr = invokeMethod(env, &jVal, INSTANCE, hedgedReadMetrics,
- "org/apache/hadoop/hdfs/DFSHedgedReadMetrics",
- "getHedgedReadOpsInCurThread", "()J");
+ JC_DFS_HEDGED_READ_METRICS, "getHedgedReadOpsInCurThread", "()J");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetHedgedReadStatistics: getHedgedReadOpsInCurThread failed");
@@ -192,10 +172,9 @@ int hdfsFileGetReadStatistics(hdfsFile file,
ret = EINVAL;
goto done;
}
- jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
- "org/apache/hadoop/hdfs/client/HdfsDataInputStream",
- "getReadStatistics",
- "()Lorg/apache/hadoop/hdfs/ReadStatistics;");
+ jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
+ JC_HDFS_DATA_INPUT_STREAM, "getReadStatistics",
+ "()Lorg/apache/hadoop/hdfs/ReadStatistics;");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsFileGetReadStatistics: getReadStatistics failed");
@@ -208,8 +187,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
goto done;
}
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
- "org/apache/hadoop/hdfs/ReadStatistics",
- "getTotalBytesRead", "()J");
+ JC_READ_STATISTICS, "getTotalBytesRead", "()J");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsFileGetReadStatistics: getTotalBytesRead failed");
@@ -218,8 +196,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
s->totalBytesRead = jVal.j;
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
- "org/apache/hadoop/hdfs/ReadStatistics",
- "getTotalLocalBytesRead", "()J");
+ JC_READ_STATISTICS, "getTotalLocalBytesRead", "()J");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsFileGetReadStatistics: getTotalLocalBytesRead failed");
@@ -228,8 +205,8 @@ int hdfsFileGetReadStatistics(hdfsFile file,
s->totalLocalBytesRead = jVal.j;
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
- "org/apache/hadoop/hdfs/ReadStatistics",
- "getTotalShortCircuitBytesRead", "()J");
+ JC_READ_STATISTICS, "getTotalShortCircuitBytesRead",
+ "()J");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsFileGetReadStatistics: getTotalShortCircuitBytesRead failed");
@@ -237,8 +214,8 @@ int hdfsFileGetReadStatistics(hdfsFile file,
}
s->totalShortCircuitBytesRead = jVal.j;
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
- "org/apache/hadoop/hdfs/ReadStatistics",
- "getTotalZeroCopyBytesRead", "()J");
+ JC_READ_STATISTICS, "getTotalZeroCopyBytesRead",
+ "()J");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsFileGetReadStatistics: getTotalZeroCopyBytesRead failed");
@@ -280,8 +257,8 @@ int hdfsFileClearReadStatistics(hdfsFile file)
goto done;
}
jthr = invokeMethod(env, NULL, INSTANCE, file->file,
- "org/apache/hadoop/hdfs/client/HdfsDataInputStream",
- "clearReadStatistics", "()V");
+ JC_HDFS_DATA_INPUT_STREAM, "clearReadStatistics",
+ "()V");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsFileClearReadStatistics: clearReadStatistics failed");
@@ -324,8 +301,7 @@ int hdfsDisableDomainSocketSecurity(void)
errno = EINTERNAL;
return -1;
}
- jthr = invokeMethod(env, NULL, STATIC, NULL,
- "org/apache/hadoop/net/unix/DomainSocket",
+ jthr = invokeMethod(env, NULL, STATIC, NULL, JC_DOMAIN_SOCKET,
"disableBindPathValidation", "()V");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -363,8 +339,8 @@ static jthrowable constructNewObjectOfPath(JNIEnv *env, const char *path,
if (jthr)
return jthr;
//Construct the org.apache.hadoop.fs.Path object
- jthr = constructNewObjectOfClass(env, &jPath, "org/apache/hadoop/fs/Path",
- "(Ljava/lang/String;)V", jPathString);
+ jthr = constructNewObjectOfCachedClass(env, &jPath, JC_PATH,
+ "(Ljava/lang/String;)V", jPathString);
destroyLocalReference(env, jPathString);
if (jthr)
return jthr;
@@ -383,8 +359,8 @@ static jthrowable hadoopConfGetStr(JNIEnv *env, jobject jConfiguration,
if (jthr)
goto done;
jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
- HADOOP_CONF, "get", JMETHOD1(JPARAM(JAVA_STRING),
- JPARAM(JAVA_STRING)), jkey);
+ JC_CONFIGURATION, "get", JMETHOD1(JPARAM(JAVA_STRING),
+ JPARAM(JAVA_STRING)), jkey);
if (jthr)
goto done;
jRet = jVal.l;
@@ -407,7 +383,8 @@ int hdfsConfGetStr(const char *key, char **val)
ret = EINTERNAL;
goto done;
}
- jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
+ jthr = constructNewObjectOfCachedClass(env, &jConfiguration,
+ JC_CONFIGURATION, "()V");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsConfGetStr(%s): new Configuration", key);
@@ -443,8 +420,8 @@ static jthrowable hadoopConfGetInt(JNIEnv *env, jobject jConfiguration,
if (jthr)
return jthr;
jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
- HADOOP_CONF, "getInt", JMETHOD2(JPARAM(JAVA_STRING), "I", "I"),
- jkey, (jint)(*val));
+ JC_CONFIGURATION, "getInt",
+ JMETHOD2(JPARAM(JAVA_STRING), "I", "I"), jkey, (jint)(*val));
destroyLocalReference(env, jkey);
if (jthr)
return jthr;
@@ -464,7 +441,8 @@ int hdfsConfGetInt(const char *key, int32_t *val)
ret = EINTERNAL;
goto done;
}
- jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
+ jthr = constructNewObjectOfCachedClass(env, &jConfiguration,
+ JC_CONFIGURATION, "()V");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsConfGetInt(%s): new Configuration", key);
@@ -697,7 +675,8 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
}
// jConfiguration = new Configuration();
- jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
+ jthr = constructNewObjectOfCachedClass(env, &jConfiguration,
+ JC_CONFIGURATION, "()V");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf)));
@@ -719,9 +698,10 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
// Get a local filesystem.
if (bld->forceNewInstance) {
// fs = FileSytem#newInstanceLocal(conf);
- jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
- "newInstanceLocal", JMETHOD1(JPARAM(HADOOP_CONF),
- JPARAM(HADOOP_LOCALFS)), jConfiguration);
+ jthr = invokeMethod(env, &jVal, STATIC, NULL,
+ JC_FILE_SYSTEM, "newInstanceLocal",
+ JMETHOD1(JPARAM(HADOOP_CONF), JPARAM(HADOOP_LOCALFS)),
+ jConfiguration);
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsBuilderConnect(%s)",
@@ -731,10 +711,10 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
jFS = jVal.l;
} else {
// fs = FileSytem#getLocal(conf);
- jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS, "getLocal",
- JMETHOD1(JPARAM(HADOOP_CONF),
- JPARAM(HADOOP_LOCALFS)),
- jConfiguration);
+ jthr = invokeMethod(env, &jVal, STATIC, NULL,
+ JC_FILE_SYSTEM, "getLocal",
+ JMETHOD1(JPARAM(HADOOP_CONF), JPARAM(HADOOP_LOCALFS)),
+ jConfiguration);
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsBuilderConnect(%s)",
@@ -746,10 +726,10 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
} else {
if (!strcmp(bld->nn, "default")) {
// jURI = FileSystem.getDefaultUri(conf)
- jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
- "getDefaultUri",
- "(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/URI;",
- jConfiguration);
+ jthr = invokeMethod(env, &jVal, STATIC, NULL,
+ JC_FILE_SYSTEM, "getDefaultUri",
+ "(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/URI;",
+ jConfiguration);
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsBuilderConnect(%s)",
@@ -769,9 +749,9 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
hdfsBuilderToStr(bld, buf, sizeof(buf)));
goto done;
}
- jthr = invokeMethod(env, &jVal, STATIC, NULL, JAVA_NET_URI,
- "create", "(Ljava/lang/String;)Ljava/net/URI;",
- jURIString);
+ jthr = invokeMethod(env, &jVal, STATIC, NULL,
+ JC_URI, "create",
+ "(Ljava/lang/String;)Ljava/net/URI;", jURIString);
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsBuilderConnect(%s)",
@@ -799,11 +779,11 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
goto done;
}
if (bld->forceNewInstance) {
- jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
- "newInstance", JMETHOD3(JPARAM(JAVA_NET_URI),
- JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING),
- JPARAM(HADOOP_FS)),
- jURI, jConfiguration, jUserString);
+ jthr = invokeMethod(env, &jVal, STATIC, NULL,
+ JC_FILE_SYSTEM, "newInstance",
+ JMETHOD3(JPARAM(JAVA_NET_URI), JPARAM(HADOOP_CONF),
+ JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)), jURI,
+ jConfiguration, jUserString);
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsBuilderConnect(%s)",
@@ -812,10 +792,11 @@ hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
}
jFS = jVal.l;
} else {
- jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS, "get",
+ jthr = invokeMethod(env, &jVal, STATIC, NULL,
+ JC_FILE_SYSTEM, "get",
JMETHOD3(JPARAM(JAVA_NET_URI), JPARAM(HADOOP_CONF),
- JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)),
- jURI, jConfiguration, jUserString);
+ JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)), jURI,
+ jConfiguration, jUserString);
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsBuilderConnect(%s)",
@@ -877,8 +858,8 @@ int hdfsDisconnect(hdfsFS fs)
return -1;
}
- jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
- "close", "()V");
+ jthr = invokeMethod(env, NULL, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "close", "()V");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsDisconnect: FileSystem#close");
@@ -909,8 +890,9 @@ static jthrowable getDefaultBlockSize(JNIEnv *env, jobject jFS,
jthrowable jthr;
jvalue jVal;
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
- "getDefaultBlockSize", JMETHOD1(JPARAM(HADOOP_PATH), "J"), jPath);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "getDefaultBlockSize", JMETHOD1(JPARAM(HADOOP_PATH),
+ "J"), jPath);
if (jthr)
return jthr;
*out = jVal.j;
@@ -945,14 +927,9 @@ struct hdfsStreamBuilder {
struct hdfsStreamBuilder *hdfsStreamBuilderAlloc(hdfsFS fs,
const char *path, int flags)
{
- size_t path_len = strlen(path);
+ int path_len = strlen(path);
struct hdfsStreamBuilder *bld;
- // Check for overflow in path_len
- if (path_len > SIZE_MAX - sizeof(struct hdfsStreamBuilder)) {
- errno = EOVERFLOW;
- return NULL;
- }
// sizeof(hdfsStreamBuilder->path) includes one byte for the string
// terminator
bld = malloc(sizeof(struct hdfsStreamBuilder) + path_len);
@@ -1018,7 +995,7 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
return f{is|os};
*/
int accmode = flags & O_ACCMODE;
- jstring jStrBufferSize = NULL, jStrReplication = NULL;
+ jstring jStrBufferSize = NULL, jStrReplication = NULL, jCapabilityString = NULL;
jobject jConfiguration = NULL, jPath = NULL, jFile = NULL;
jobject jFS = (jobject)fs;
jthrowable jthr;
@@ -1057,14 +1034,14 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
}
if (accmode == O_RDONLY) {
- method = "open";
- signature = JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM));
+ method = "open";
+ signature = JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_FSDISTRM));
} else if (flags & O_APPEND) {
- method = "append";
- signature = JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_OSTRM));
+ method = "append";
+ signature = JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_FSDOSTRM));
} else {
- method = "create";
- signature = JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM));
+ method = "create";
+ signature = JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_FSDOSTRM));
}
/* Create an object of org.apache.hadoop.fs.Path */
@@ -1076,8 +1053,8 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
}
/* Get the Configuration object from the FileSystem object */
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
- "getConf", JMETHOD1("", JPARAM(HADOOP_CONF)));
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "getConf", JMETHOD1("", JPARAM(HADOOP_CONF)));
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsOpenFile(%s): FileSystem#getConf", path);
@@ -1097,9 +1074,9 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
}
if (!bufferSize) {
- jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
- HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
- jStrBufferSize, 4096);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
+ JC_CONFIGURATION, "getInt",
+ "(Ljava/lang/String;I)I", jStrBufferSize, 4096);
if (jthr) {
ret = printExceptionAndFree(env, jthr, NOPRINT_EXC_FILE_NOT_FOUND |
NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_UNRESOLVED_LINK,
@@ -1112,9 +1089,9 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
if ((accmode == O_WRONLY) && (flags & O_APPEND) == 0) {
if (!replication) {
- jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
- HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
- jStrReplication, 1);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
+ JC_CONFIGURATION, "getInt",
+ "(Ljava/lang/String;I)I", jStrReplication, 1);
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsOpenFile(%s): Configuration#getInt(dfs.replication)",
@@ -1130,12 +1107,12 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
// READ?
if (accmode == O_RDONLY) {
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
- method, signature, jPath, jBufferSize);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ method, signature, jPath, jBufferSize);
} else if ((accmode == O_WRONLY) && (flags & O_APPEND)) {
// WRITE/APPEND?
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
- method, signature, jPath);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ method, signature, jPath);
} else {
// WRITE/CREATE
jboolean jOverWrite = 1;
@@ -1148,9 +1125,9 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
goto done;
}
}
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
- method, signature, jPath, jOverWrite,
- jBufferSize, jReplication, jBlockSize);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ method, signature, jPath, jOverWrite, jBufferSize,
+ jReplication, jBlockSize);
}
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1176,16 +1153,23 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
file->flags = 0;
if ((flags & O_WRONLY) == 0) {
- // Try a test read to see if we can do direct reads
- char buf;
- if (readDirect(fs, file, &buf, 0) == 0) {
- // Success - 0-byte read should return 0
+ // Check the StreamCapabilities of jFile to see if we can do direct reads
+ jthr = newJavaStr(env, "in:readbytebuffer", &jCapabilityString);
+ if (jthr) {
+ ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ "hdfsOpenFile(%s): newJavaStr", path);
+ goto done;
+ }
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFile,
+ JC_FS_DATA_INPUT_STREAM, "hasCapability",
+ "(Ljava/lang/String;)Z", jCapabilityString);
+ if (jthr) {
+ ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ "hdfsOpenFile(%s): FSDataInputStream#hasCapability", path);
+ goto done;
+ }
+ if (jVal.z) {
file->flags |= HDFS_FILE_SUPPORTS_DIRECT_READ;
- } else if (errno != ENOTSUP) {
- // Unexpected error. Clear it, don't set the direct flag.
- fprintf(stderr,
- "hdfsOpenFile(%s): WARN: Unexpected error %d when testing "
- "for direct read compatibility\n", path, errno);
}
}
ret = 0;
@@ -1195,7 +1179,8 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
destroyLocalReference(env, jStrReplication);
destroyLocalReference(env, jConfiguration);
destroyLocalReference(env, jPath);
- destroyLocalReference(env, jFile);
+ destroyLocalReference(env, jFile);
+ destroyLocalReference(env, jCapabilityString);
if (ret) {
if (file) {
if (file->file) {
@@ -1241,9 +1226,9 @@ int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength)
return -1;
}
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
- "truncate", JMETHOD2(JPARAM(HADOOP_PATH), "J", "Z"),
- jPath, newlength);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "truncate", JMETHOD2(JPARAM(HADOOP_PATH), "J", "Z"),
+ jPath, newlength);
destroyLocalReference(env, jPath);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1270,11 +1255,11 @@ int hdfsUnbufferFile(hdfsFile file)
ret = ENOTSUP;
goto done;
}
- jthr = invokeMethod(env, NULL, INSTANCE, file->file, HADOOP_ISTRM,
- "unbuffer", "()V");
+ jthr = invokeMethod(env, NULL, INSTANCE, file->file,
+ JC_FS_DATA_INPUT_STREAM, "unbuffer", "()V");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
- HADOOP_ISTRM "#unbuffer failed:");
+ HADOOP_FSDISTRM "#unbuffer failed:");
goto done;
}
ret = 0;
@@ -1291,7 +1276,7 @@ int hdfsCloseFile(hdfsFS fs, hdfsFile file)
// file.close
//The interface whose 'close' method to be called
- const char *interface;
+ CachedJavaClass cachedJavaClass;
const char *interfaceShortName;
//Caught exception
@@ -1310,11 +1295,14 @@ int hdfsCloseFile(hdfsFS fs, hdfsFile file)
return -1;
}
- interface = (file->type == HDFS_STREAM_INPUT) ?
- HADOOP_ISTRM : HADOOP_OSTRM;
+ if (file->type == HDFS_STREAM_INPUT) {
+ cachedJavaClass = JC_FS_DATA_INPUT_STREAM;
+ } else {
+ cachedJavaClass = JC_FS_DATA_OUTPUT_STREAM;
+ }
- jthr = invokeMethod(env, NULL, INSTANCE, file->file, interface,
- "close", "()V");
+ jthr = invokeMethod(env, NULL, INSTANCE, file->file,
+ cachedJavaClass, "close", "()V");
if (jthr) {
interfaceShortName = (file->type == HDFS_STREAM_INPUT) ?
"FSDataInputStream" : "FSDataOutputStream";
@@ -1358,7 +1346,7 @@ int hdfsExists(hdfsFS fs, const char *path)
"hdfsExists: constructNewObjectOfPath");
return -1;
}
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
"exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"), jPath);
destroyLocalReference(env, jPath);
if (jthr) {
@@ -1401,7 +1389,6 @@ tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
{
jobject jInputStream;
jbyteArray jbRarray;
- jint noReadBytes = length;
jvalue jVal;
jthrowable jthr;
JNIEnv* env;
@@ -1440,8 +1427,8 @@ tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
return -1;
}
- jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream, HADOOP_ISTRM,
- "read", "([B)I", jbRarray);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
+ JC_FS_DATA_INPUT_STREAM, "read", "([B)I", jbRarray);
if (jthr) {
destroyLocalReference(env, jbRarray);
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1457,7 +1444,12 @@ tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
errno = EINTR;
return -1;
}
- (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
+ // We only copy the portion of the jbRarray that was actually filled by
+ // the call to FsDataInputStream#read; #read is not guaranteed to fill the
+ // entire buffer, instead it returns the number of bytes read into the
+ // buffer; we use the return value as the input in GetByteArrayRegion to
+ // ensure don't copy more bytes than necessary
+ (*env)->GetByteArrayRegion(env, jbRarray, 0, jVal.i, buffer);
destroyLocalReference(env, jbRarray);
if ((*env)->ExceptionCheck(env)) {
errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
@@ -1499,7 +1491,8 @@ tSize readDirect(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
}
jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
- HADOOP_ISTRM, "read", "(Ljava/nio/ByteBuffer;)I", bb);
+ JC_FS_DATA_INPUT_STREAM, "read",
+ "(Ljava/nio/ByteBuffer;)I", bb);
destroyLocalReference(env, bb);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1550,8 +1543,9 @@ tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
"hdfsPread: NewByteArray");
return -1;
}
- jthr = invokeMethod(env, &jVal, INSTANCE, f->file, HADOOP_ISTRM,
- "read", "(J[BII)I", position, jbRarray, 0, length);
+ jthr = invokeMethod(env, &jVal, INSTANCE, f->file,
+ JC_FS_DATA_INPUT_STREAM, "read", "(J[BII)I", position,
+ jbRarray, 0, length);
if (jthr) {
destroyLocalReference(env, jbRarray);
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1636,7 +1630,8 @@ tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
return -1;
}
jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
- HADOOP_OSTRM, "write", "([B)V", jbWarray);
+ JC_FS_DATA_OUTPUT_STREAM, "write", "([B)V",
+ jbWarray);
destroyLocalReference(env, jbWarray);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1671,7 +1666,7 @@ int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
jInputStream = f->file;
jthr = invokeMethod(env, NULL, INSTANCE, jInputStream,
- HADOOP_ISTRM, "seek", "(J)V", desiredPos);
+ JC_FS_DATA_INPUT_STREAM, "seek", "(J)V", desiredPos);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsSeek(desiredPos=%" PRId64 ")"
@@ -1681,15 +1676,13 @@ int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
return 0;
}
-
-
tOffset hdfsTell(hdfsFS fs, hdfsFile f)
{
// JAVA EQUIVALENT
// pos = f.getPos();
jobject jStream;
- const char *interface;
+ CachedJavaClass cachedJavaClass;
jvalue jVal;
jthrowable jthr;
@@ -1708,10 +1701,13 @@ tOffset hdfsTell(hdfsFS fs, hdfsFile f)
//Parameters
jStream = f->file;
- interface = (f->type == HDFS_STREAM_INPUT) ?
- HADOOP_ISTRM : HADOOP_OSTRM;
+ if (f->type == HDFS_STREAM_INPUT) {
+ cachedJavaClass = JC_FS_DATA_INPUT_STREAM;
+ } else {
+ cachedJavaClass = JC_FS_DATA_OUTPUT_STREAM;
+ }
jthr = invokeMethod(env, &jVal, INSTANCE, jStream,
- interface, "getPos", "()J");
+ cachedJavaClass, "getPos", "()J");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsTell: %s#getPos",
@@ -1742,7 +1738,7 @@ int hdfsFlush(hdfsFS fs, hdfsFile f)
return -1;
}
jthr = invokeMethod(env, NULL, INSTANCE, f->file,
- HADOOP_OSTRM, "flush", "()V");
+ JC_FS_DATA_OUTPUT_STREAM, "flush", "()V");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsFlush: FSDataInputStream#flush");
@@ -1771,7 +1767,7 @@ int hdfsHFlush(hdfsFS fs, hdfsFile f)
jOutputStream = f->file;
jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
- HADOOP_OSTRM, "hflush", "()V");
+ JC_FS_DATA_OUTPUT_STREAM, "hflush", "()V");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsHFlush: FSDataOutputStream#hflush");
@@ -1800,7 +1796,7 @@ int hdfsHSync(hdfsFS fs, hdfsFile f)
jOutputStream = f->file;
jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
- HADOOP_OSTRM, "hsync", "()V");
+ JC_FS_DATA_OUTPUT_STREAM, "hsync", "()V");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsHSync: FSDataOutputStream#hsync");
@@ -1834,7 +1830,7 @@ int hdfsAvailable(hdfsFS fs, hdfsFile f)
//Parameters
jInputStream = f->file;
jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
- HADOOP_ISTRM, "available", "()I");
+ JC_FS_DATA_INPUT_STREAM, "available", "()I");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsAvailable: FSDataInputStream#available");
@@ -1879,8 +1875,8 @@ static int hdfsCopyImpl(hdfsFS srcFS, const char *src, hdfsFS dstFS,
}
//Create the org.apache.hadoop.conf.Configuration object
- jthr = constructNewObjectOfClass(env, &jConfiguration,
- HADOOP_CONF, "()V");
+ jthr = constructNewObjectOfCachedClass(env, &jConfiguration,
+ JC_CONFIGURATION, "()V");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsCopyImpl: Configuration constructor");
@@ -1888,8 +1884,8 @@ static int hdfsCopyImpl(hdfsFS srcFS, const char *src, hdfsFS dstFS,
}
//FileUtil#copy
- jthr = invokeMethod(env, &jVal, STATIC,
- NULL, "org/apache/hadoop/fs/FileUtil", "copy",
+ jthr = invokeMethod(env, &jVal, STATIC, NULL, JC_FILE_UTIL,
+ "copy",
"(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;"
"Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;"
"ZLorg/apache/hadoop/conf/Configuration;)Z",
@@ -1955,9 +1951,9 @@ int hdfsDelete(hdfsFS fs, const char *path, int recursive)
return -1;
}
jRecursive = recursive ? JNI_TRUE : JNI_FALSE;
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
- "delete", "(Lorg/apache/hadoop/fs/Path;Z)Z",
- jPath, jRecursive);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "delete", "(Lorg/apache/hadoop/fs/Path;Z)Z", jPath,
+ jRecursive);
destroyLocalReference(env, jPath);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -2009,9 +2005,9 @@ int hdfsRename(hdfsFS fs, const char *oldPath, const char *newPath)
// Rename the file
// TODO: use rename2 here? (See HDFS-3592)
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS, "rename",
- JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_PATH), "Z"),
- jOldPath, jNewPath);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "rename", JMETHOD2(JPARAM(HADOOP_PATH), JPARAM
+ (HADOOP_PATH), "Z"), jOldPath, jNewPath);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsRename(oldPath=%s, newPath=%s): FileSystem#rename",
@@ -2054,9 +2050,8 @@ char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
}
//FileSystem#getWorkingDirectory()
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
- HADOOP_FS, "getWorkingDirectory",
- "()Lorg/apache/hadoop/fs/Path;");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "getWorkingDirectory", "()Lorg/apache/hadoop/fs/Path;");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetWorkingDirectory: FileSystem#getWorkingDirectory");
@@ -2071,9 +2066,8 @@ char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
}
//Path#toString()
- jthr = invokeMethod(env, &jVal, INSTANCE, jPath,
- "org/apache/hadoop/fs/Path", "toString",
- "()Ljava/lang/String;");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jPath, JC_PATH, "toString",
+ "()Ljava/lang/String;");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetWorkingDirectory: Path#toString");
@@ -2137,9 +2131,9 @@ int hdfsSetWorkingDirectory(hdfsFS fs, const char *path)
}
//FileSystem#setWorkingDirectory()
- jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
- "setWorkingDirectory",
- "(Lorg/apache/hadoop/fs/Path;)V", jPath);
+ jthr = invokeMethod(env, NULL, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "setWorkingDirectory", "(Lorg/apache/hadoop/fs/Path;)V",
+ jPath);
destroyLocalReference(env, jPath);
if (jthr) {
errno = printExceptionAndFree(env, jthr, NOPRINT_EXC_ILLEGAL_ARGUMENT,
@@ -2179,9 +2173,8 @@ int hdfsCreateDirectory(hdfsFS fs, const char *path)
//Create the directory
jVal.z = 0;
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
- "mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z",
- jPath);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z", jPath);
destroyLocalReference(env, jPath);
if (jthr) {
errno = printExceptionAndFree(env, jthr,
@@ -2229,9 +2222,9 @@ int hdfsSetReplication(hdfsFS fs, const char *path, int16_t replication)
}
//Create the directory
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
- "setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
- jPath, replication);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
+ jPath, replication);
destroyLocalReference(env, jPath);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -2292,8 +2285,8 @@ int hdfsChown(hdfsFS fs, const char *path, const char *owner, const char *group)
}
//Create the directory
- jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
- "setOwner", JMETHOD3(JPARAM(HADOOP_PATH),
+ jthr = invokeMethod(env, NULL, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "setOwner", JMETHOD3(JPARAM(HADOOP_PATH),
JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JAVA_VOID),
jPath, jOwner, jGroup);
if (jthr) {
@@ -2337,12 +2330,12 @@ int hdfsChmod(hdfsFS fs, const char *path, short mode)
}
// construct jPerm = FsPermission.createImmutable(short mode);
- jthr = constructNewObjectOfClass(env, &jPermObj,
- HADOOP_FSPERM,"(S)V",jmode);
+ jthr = constructNewObjectOfCachedClass(env, &jPermObj, JC_FS_PERMISSION,
+ "(S)V",jmode);
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
- "constructNewObjectOfClass(%s)", HADOOP_FSPERM);
- return -1;
+ "constructNewObjectOfCachedClass(%s)", HADOOP_FSPERM);
+ goto done;
}
//Create an object of org.apache.hadoop.fs.Path
@@ -2354,10 +2347,9 @@ int hdfsChmod(hdfsFS fs, const char *path, short mode)
}
//Create the directory
- jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
- "setPermission",
- JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_FSPERM), JAVA_VOID),
- jPath, jPermObj);
+ jthr = invokeMethod(env, NULL, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "setPermission", JMETHOD2(JPARAM(HADOOP_PATH),
+ JPARAM(HADOOP_FSPERM), JAVA_VOID), jPath, jPermObj);
if (jthr) {
ret = printExceptionAndFree(env, jthr,
NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_FILE_NOT_FOUND |
@@ -2407,9 +2399,9 @@ int hdfsUtime(hdfsFS fs, const char *path, tTime mtime, tTime atime)
jmtime = (mtime == NO_CHANGE) ? -1 : (mtime * (jlong)1000);
jatime = (atime == NO_CHANGE) ? -1 : (atime * (jlong)1000);
- jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
- "setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J", JAVA_VOID),
- jPath, jmtime, jatime);
+ jthr = invokeMethod(env, NULL, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J",
+ JAVA_VOID), jPath, jmtime, jatime);
destroyLocalReference(env, jPath);
if (jthr) {
errno = printExceptionAndFree(env, jthr,
@@ -2485,6 +2477,8 @@ int hadoopRzOptionsSetByteBufferPool(
JNIEnv *env;
jthrowable jthr;
jobject byteBufferPool = NULL;
+ jobject globalByteBufferPool = NULL;
+ int ret;
env = getJNIEnv();
if (!env) {
@@ -2501,15 +2495,37 @@ int hadoopRzOptionsSetByteBufferPool(
if (jthr) {
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hadoopRzOptionsSetByteBufferPool(className=%s): ", className);
- errno = EINVAL;
- return -1;
+ ret = EINVAL;
+ goto done;
}
- }
- if (opts->byteBufferPool) {
- // Delete any previous ByteBufferPool we had.
+ // Only set opts->byteBufferPool if creating a global reference is
+ // successful
+ globalByteBufferPool = (*env)->NewGlobalRef(env, byteBufferPool);
+ if (!globalByteBufferPool) {
+ printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+ "hadoopRzOptionsSetByteBufferPool(className=%s): ",
+ className);
+ ret = EINVAL;
+ goto done;
+ }
+ // Delete any previous ByteBufferPool we had before setting a new one.
+ if (opts->byteBufferPool) {
+ (*env)->DeleteGlobalRef(env, opts->byteBufferPool);
+ }
+ opts->byteBufferPool = globalByteBufferPool;
+ } else if (opts->byteBufferPool) {
+ // If the specified className is NULL, delete any previous
+ // ByteBufferPool we had.
(*env)->DeleteGlobalRef(env, opts->byteBufferPool);
+ opts->byteBufferPool = NULL;
+ }
+ ret = 0;
+done:
+ destroyLocalReference(env, byteBufferPool);
+ if (ret) {
+ errno = ret;
+ return -1;
}
- opts->byteBufferPool = byteBufferPool;
return 0;
}
@@ -2549,28 +2565,28 @@ static jthrowable hadoopRzOptionsGetEnumSet(JNIEnv *env,
goto done;
}
if (opts->skipChecksums) {
- jthr = fetchEnumInstance(env, READ_OPTION,
+ jthr = fetchEnumInstance(env, HADOOP_RO,
"SKIP_CHECKSUMS", &enumInst);
if (jthr) {
goto done;
}
- jthr = invokeMethod(env, &jVal, STATIC, NULL,
- "java/util/EnumSet", "of",
- "(Ljava/lang/Enum;)Ljava/util/EnumSet;", enumInst);
+ jthr = invokeMethod(env, &jVal, STATIC, NULL, JC_ENUM_SET,
+ "of", "(Ljava/lang/Enum;)Ljava/util/EnumSet;", enumInst);
if (jthr) {
goto done;
}
enumSetObj = jVal.l;
} else {
- jclass clazz = (*env)->FindClass(env, READ_OPTION);
+ jclass clazz = (*env)->FindClass(env, HADOOP_RO);
if (!clazz) {
- jthr = newRuntimeError(env, "failed "
- "to find class for %s", READ_OPTION);
+ jthr = getPendingExceptionAndClear(env);
+ goto done;
+ }
+ jthr = invokeMethod(env, &jVal, STATIC, NULL, JC_ENUM_SET,
+ "noneOf", "(Ljava/lang/Class;)Ljava/util/EnumSet;", clazz);
+ if (jthr) {
goto done;
}
- jthr = invokeMethod(env, &jVal, STATIC, NULL,
- "java/util/EnumSet", "noneOf",
- "(Ljava/lang/Class;)Ljava/util/EnumSet;", clazz);
enumSetObj = jVal.l;
}
// create global ref
@@ -2599,7 +2615,7 @@ static int hadoopReadZeroExtractBuffer(JNIEnv *env,
jarray array = NULL;
jthr = invokeMethod(env, &jVal, INSTANCE, buffer->byteBuffer,
- "java/nio/ByteBuffer", "remaining", "()I");
+ JC_BYTE_BUFFER, "remaining", "()I");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hadoopReadZeroExtractBuffer: ByteBuffer#remaining failed: ");
@@ -2607,7 +2623,7 @@ static int hadoopReadZeroExtractBuffer(JNIEnv *env,
}
buffer->length = jVal.i;
jthr = invokeMethod(env, &jVal, INSTANCE, buffer->byteBuffer,
- "java/nio/ByteBuffer", "position", "()I");
+ JC_BYTE_BUFFER, "position", "()I");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hadoopReadZeroExtractBuffer: ByteBuffer#position failed: ");
@@ -2638,7 +2654,7 @@ static int hadoopReadZeroExtractBuffer(JNIEnv *env,
}
// Get the backing array object of this buffer.
jthr = invokeMethod(env, &jVal, INSTANCE, buffer->byteBuffer,
- "java/nio/ByteBuffer", "array", "()[B");
+ JC_BYTE_BUFFER, "array", "()[B");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hadoopReadZeroExtractBuffer: ByteBuffer#array failed: ");
@@ -2691,6 +2707,7 @@ static int translateZCRException(JNIEnv *env, jthrowable exc)
}
if (!strcmp(className, "java.lang.UnsupportedOperationException")) {
ret = EPROTONOSUPPORT;
+ destroyLocalReference(env, exc);
goto done;
}
ret = printExceptionAndFree(env, exc, PRINT_EXC_ALL,
@@ -2731,9 +2748,10 @@ struct hadoopRzBuffer* hadoopReadZero(hdfsFile file,
"hadoopReadZero: hadoopRzOptionsGetEnumSet failed: ");
goto done;
}
- jthr = invokeMethod(env, &jVal, INSTANCE, file->file, HADOOP_ISTRM, "read",
- "(Lorg/apache/hadoop/io/ByteBufferPool;ILjava/util/EnumSet;)"
- "Ljava/nio/ByteBuffer;", opts->byteBufferPool, maxLength, enumSet);
+ jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
+ JC_FS_DATA_INPUT_STREAM, "read",
+ "(Lorg/apache/hadoop/io/ByteBufferPool;ILjava/util/EnumSet;)"
+ "Ljava/nio/ByteBuffer;", opts->byteBufferPool, maxLength, enumSet);
if (jthr) {
ret = translateZCRException(env, jthr);
goto done;
@@ -2796,8 +2814,8 @@ void hadoopRzBufferFree(hdfsFile file, struct hadoopRzBuffer *buffer)
}
if (buffer->byteBuffer) {
jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
- HADOOP_ISTRM, "releaseBuffer",
- "(Ljava/nio/ByteBuffer;)V", buffer->byteBuffer);
+ JC_FS_DATA_INPUT_STREAM, "releaseBuffer",
+ "(Ljava/nio/ByteBuffer;)V", buffer->byteBuffer);
if (jthr) {
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hadoopRzBufferFree: releaseBuffer failed: ");
@@ -2846,8 +2864,8 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, tOffset length)
"hdfsGetHosts(path=%s): constructNewObjectOfPath", path);
goto done;
}
- jthr = invokeMethod(env, &jFSVal, INSTANCE, jFS,
- HADOOP_FS, "getFileStatus", "(Lorg/apache/hadoop/fs/Path;)"
+ jthr = invokeMethod(env, &jFSVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "getFileStatus", "(Lorg/apache/hadoop/fs/Path;)"
"Lorg/apache/hadoop/fs/FileStatus;", jPath);
if (jthr) {
ret = printExceptionAndFree(env, jthr, NOPRINT_EXC_FILE_NOT_FOUND,
@@ -2859,11 +2877,11 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, tOffset length)
jFileStatus = jFSVal.l;
//org.apache.hadoop.fs.FileSystem#getFileBlockLocations
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
- HADOOP_FS, "getFileBlockLocations",
- "(Lorg/apache/hadoop/fs/FileStatus;JJ)"
- "[Lorg/apache/hadoop/fs/BlockLocation;",
- jFileStatus, start, length);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "getFileBlockLocations",
+ "(Lorg/apache/hadoop/fs/FileStatus;JJ)"
+ "[Lorg/apache/hadoop/fs/BlockLocation;", jFileStatus, start,
+ length);
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
@@ -2890,15 +2908,17 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, tOffset length)
for (i = 0; i < jNumFileBlocks; ++i) {
jFileBlock =
(*env)->GetObjectArrayElement(env, jBlockLocations, i);
- if (!jFileBlock) {
- ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+ jthr = (*env)->ExceptionOccurred(env);
+ if (jthr || !jFileBlock) {
+ ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
"GetObjectArrayElement(%d)", path, start, length, i);
goto done;
}
- jthr = invokeMethod(env, &jVal, INSTANCE, jFileBlock, HADOOP_BLK_LOC,
- "getHosts", "()[Ljava/lang/String;");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFileBlock,
+ JC_BLOCK_LOCATION, "getHosts",
+ "()[Ljava/lang/String;");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
@@ -2924,8 +2944,9 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, tOffset length)
//Now parse each hostname
for (j = 0; j < jNumBlockHosts; ++j) {
jHost = (*env)->GetObjectArrayElement(env, jFileBlockHosts, j);
- if (!jHost) {
- ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+ jthr = (*env)->ExceptionOccurred(env);
+ if (jthr || !jHost) {
+ ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"): "
"NewByteArray", path, start, length);
goto done;
@@ -3002,8 +3023,8 @@ tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
}
//FileSystem#getDefaultBlockSize()
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
- "getDefaultBlockSize", "()J");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "getDefaultBlockSize", "()J");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetDefaultBlockSize: FileSystem#getDefaultBlockSize");
@@ -3066,16 +3087,16 @@ tOffset hdfsGetCapacity(hdfsFS fs)
}
//FileSystem#getStatus
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
- "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetCapacity: FileSystem#getStatus");
return -1;
}
fss = (jobject)jVal.l;
- jthr = invokeMethod(env, &jVal, INSTANCE, fss, HADOOP_FSSTATUS,
- "getCapacity", "()J");
+ jthr = invokeMethod(env, &jVal, INSTANCE, fss,
+ JC_FS_STATUS, "getCapacity", "()J");
destroyLocalReference(env, fss);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -3106,16 +3127,16 @@ tOffset hdfsGetUsed(hdfsFS fs)
}
//FileSystem#getStatus
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
- "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetUsed: FileSystem#getStatus");
return -1;
}
fss = (jobject)jVal.l;
- jthr = invokeMethod(env, &jVal, INSTANCE, fss, HADOOP_FSSTATUS,
- "getUsed", "()J");
+ jthr = invokeMethod(env, &jVal, INSTANCE, fss, JC_FS_STATUS,
+ HADOOP_FSSTATUS,"getUsed", "()J");
destroyLocalReference(env, fss);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -3173,46 +3194,46 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
struct hdfsExtendedFileInfo *extInfo;
size_t extOffset;
- jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
- HADOOP_STAT, "isDir", "()Z");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS, "isDir",
+ "()Z");
if (jthr)
goto done;
fileInfo->mKind = jVal.z ? kObjectKindDirectory : kObjectKindFile;
- jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
- HADOOP_STAT, "getReplication", "()S");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
+ "getReplication", "()S");
if (jthr)
goto done;
fileInfo->mReplication = jVal.s;
- jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
- HADOOP_STAT, "getBlockSize", "()J");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
+ "getBlockSize", "()J");
if (jthr)
goto done;
fileInfo->mBlockSize = jVal.j;
- jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
- HADOOP_STAT, "getModificationTime", "()J");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
+ "getModificationTime", "()J");
if (jthr)
goto done;
fileInfo->mLastMod = jVal.j / 1000;
- jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
- HADOOP_STAT, "getAccessTime", "()J");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
+ "getAccessTime", "()J");
if (jthr)
goto done;
fileInfo->mLastAccess = (tTime) (jVal.j / 1000);
if (fileInfo->mKind == kObjectKindFile) {
- jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
- HADOOP_STAT, "getLen", "()J");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
+ "getLen", "()J");
if (jthr)
goto done;
fileInfo->mSize = jVal.j;
}
- jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
- "getPath", "()Lorg/apache/hadoop/fs/Path;");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
+ "getPath", "()Lorg/apache/hadoop/fs/Path;");
if (jthr)
goto done;
jPath = jVal.l;
@@ -3222,8 +3243,8 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
goto done;
}
- jthr = invokeMethod(env, &jVal, INSTANCE, jPath, HADOOP_PATH,
- "toString", "()Ljava/lang/String;");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jPath, JC_PATH, "toString",
+ "()Ljava/lang/String;");
if (jthr)
goto done;
jPathName = jVal.l;
@@ -3235,8 +3256,8 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
}
fileInfo->mName = strdup(cPathName);
(*env)->ReleaseStringUTFChars(env, jPathName, cPathName);
- jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
- "getOwner", "()Ljava/lang/String;");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS, "getOwner",
+ "()Ljava/lang/String;");
if (jthr)
goto done;
jUserName = jVal.l;
@@ -3256,16 +3277,16 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
(*env)->ReleaseStringUTFChars(env, jUserName, cUserName);
extInfo = getExtendedFileInfo(fileInfo);
memset(extInfo, 0, sizeof(*extInfo));
- jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
- HADOOP_STAT, "isEncrypted", "()Z");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
+ "isEncrypted", "()Z");
if (jthr) {
goto done;
}
if (jVal.z == JNI_TRUE) {
extInfo->flags |= HDFS_EXTENDED_FILE_INFO_ENCRYPTED;
}
- jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
- "getGroup", "()Ljava/lang/String;");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
+ "getGroup", "()Ljava/lang/String;");
if (jthr)
goto done;
jGroupName = jVal.l;
@@ -3277,19 +3298,19 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
fileInfo->mGroup = strdup(cGroupName);
(*env)->ReleaseStringUTFChars(env, jGroupName, cGroupName);
- jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
+ jthr = invokeMethod(env, &jVal, INSTANCE, jStat, JC_FILE_STATUS,
"getPermission",
"()Lorg/apache/hadoop/fs/permission/FsPermission;");
if (jthr)
goto done;
if (jVal.l == NULL) {
jthr = newRuntimeError(env, "%s#getPermission returned NULL!",
- HADOOP_STAT);
+ HADOOP_FILESTAT);
goto done;
}
jPermission = jVal.l;
- jthr = invokeMethod(env, &jVal, INSTANCE, jPermission, HADOOP_FSPERM,
- "toShort", "()S");
+ jthr = invokeMethod(env, &jVal, INSTANCE, jPermission,
+ JC_FS_PERMISSION, "toShort", "()S");
if (jthr)
goto done;
fileInfo->mPermissions = jVal.s;
@@ -3303,7 +3324,6 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
destroyLocalReference(env, jUserName);
destroyLocalReference(env, jGroupName);
destroyLocalReference(env, jPermission);
- destroyLocalReference(env, jPath);
return jthr;
}
@@ -3323,18 +3343,17 @@ getFileInfo(JNIEnv *env, jobject jFS, jobject jPath, hdfsFileInfo **fileInfo)
jvalue jVal;
jthrowable jthr;
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
- "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
- jPath);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM, "exists",
+ JMETHOD1(JPARAM(HADOOP_PATH), "Z"), jPath);
if (jthr)
return jthr;
if (jVal.z == 0) {
*fileInfo = NULL;
return NULL;
}
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
- HADOOP_FS, "getFileStatus",
- JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_STAT)), jPath);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS, JC_FILE_SYSTEM,
+ "getFileStatus", JMETHOD1(JPARAM(HADOOP_PATH), JPARAM
+ (HADOOP_FILESTAT)), jPath);
if (jthr)
return jthr;
jStat = jVal.l;
@@ -3384,9 +3403,9 @@ hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char *path, int *numEntries)
goto done;
}
- jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_DFS, "listStatus",
- JMETHOD1(JPARAM(HADOOP_PATH), JARRPARAM(HADOOP_STAT)),
- jPath);
+ jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
+ JC_DISTRIBUTED_FILE_SYSTEM, "listStatus",
+ JMETHOD1(JPARAM(HADOOP_PATH), JARRPARAM(HADOOP_FILESTAT)), jPath);
if (jthr) {
ret = printExceptionAndFree(env, jthr,
NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_FILE_NOT_FOUND |
@@ -3413,8 +3432,9 @@ hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char *path, int *numEntries)
//Save path information in pathList
for (i=0; i < jPathListSize; ++i) {
tmpStat = (*env)->GetObjectArrayElement(env, jPathList, i);
- if (!tmpStat) {
- ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+ jthr = (*env)->ExceptionOccurred(env);
+ if (jthr || !tmpStat) {
+ ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsListDirectory(%s): GetObjectArrayElement(%d out of %d)",
path, i, jPathListSize);
goto done;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
index 7e45634d4e02b..e58a6232d205a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
@@ -600,7 +600,8 @@ extern "C" {
tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length);
/**
- * hdfsPread - Positional read of data from an open file.
+ * hdfsPread - Positional read of data from an open file. Reads up to the
+ * number of specified bytes in length.
* @param fs The configured filesystem handle.
* @param file The file handle.
* @param position Position from which to read
@@ -612,6 +613,24 @@ extern "C" {
tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position,
void* buffer, tSize length);
+ /**
+ * hdfsPreadFully - Positional read of data from an open file. Reads the
+ * number of specified bytes in length, or until the end of the data is
+ * reached. Unlike hdfsRead and hdfsPread, this method does not return
+ * the number of bytes read because either (1) the entire length of the
+ * buffer is filled, or (2) the end of the file is reached. If the eof is
+ * reached, an exception is thrown and errno is set to EINTR.
+ * @param fs The configured filesystem handle.
+ * @param file The file handle.
+ * @param position Position from which to read
+ * @param buffer The buffer to copy read bytes into.
+ * @param length The length of the buffer.
+ * @return Returns 0 on success, -1 on error.
+ */
+ LIBHDFS_EXTERNAL
+ int hdfsPreadFully(hdfsFS fs, hdfsFile file, tOffset position,
+ void* buffer, tSize length);
+
/**
* hdfsWrite - Write data into an open file.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jclasses.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jclasses.c
new file mode 100644
index 0000000000000..cf880e91b7596
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jclasses.c
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "exception.h"
+#include "jclasses.h"
+#include "jni_helper.h"
+#include "os/mutexes.h"
+
+#include
+
+/**
+ * Whether initCachedClasses has been called or not. Protected by the mutex
+ * jclassInitMutex.
+ */
+static int jclassesInitialized = 0;
+
+typedef struct {
+ jclass javaClass;
+ const char *className;
+} javaClassAndName;
+
+/**
+ * A collection of commonly used jclass objects that are used throughout
+ * libhdfs. The jclasses are loaded immediately after the JVM is created (see
+ * initCachedClasses). The array is indexed using CachedJavaClass.
+ */
+javaClassAndName cachedJavaClasses[NUM_CACHED_CLASSES];
+
+/**
+ * Helper method that creates and sets a jclass object given a class name.
+ * Returns a jthrowable on error, NULL otherwise.
+ */
+static jthrowable initCachedClass(JNIEnv *env, const char *className,
+ jclass *cachedJclass) {
+ assert(className != NULL && "Found a CachedJavaClass without a class "
+ "name");
+ jthrowable jthr = NULL;
+ jclass tempLocalClassRef;
+ tempLocalClassRef = (*env)->FindClass(env, className);
+ if (!tempLocalClassRef) {
+ jthr = getPendingExceptionAndClear(env);
+ goto done;
+ }
+ *cachedJclass = (jclass) (*env)->NewGlobalRef(env, tempLocalClassRef);
+ if (!*cachedJclass) {
+ jthr = getPendingExceptionAndClear(env);
+ goto done;
+ }
+done:
+ destroyLocalReference(env, tempLocalClassRef);
+ return jthr;
+}
+
+jthrowable initCachedClasses(JNIEnv* env) {
+ mutexLock(&jclassInitMutex);
+ if (!jclassesInitialized) {
+ // Set all the class names
+ cachedJavaClasses[JC_CONFIGURATION].className =
+ "org/apache/hadoop/conf/Configuration";
+ cachedJavaClasses[JC_PATH].className =
+ "org/apache/hadoop/fs/Path";
+ cachedJavaClasses[JC_FILE_SYSTEM].className =
+ "org/apache/hadoop/fs/FileSystem";
+ cachedJavaClasses[JC_FS_STATUS].className =
+ "org/apache/hadoop/fs/FsStatus";
+ cachedJavaClasses[JC_FILE_UTIL].className =
+ "org/apache/hadoop/fs/FileUtil";
+ cachedJavaClasses[JC_BLOCK_LOCATION].className =
+ "org/apache/hadoop/fs/BlockLocation";
+ cachedJavaClasses[JC_DFS_HEDGED_READ_METRICS].className =
+ "org/apache/hadoop/hdfs/DFSHedgedReadMetrics";
+ cachedJavaClasses[JC_DISTRIBUTED_FILE_SYSTEM].className =
+ "org/apache/hadoop/hdfs/DistributedFileSystem";
+ cachedJavaClasses[JC_FS_DATA_INPUT_STREAM].className =
+ "org/apache/hadoop/fs/FSDataInputStream";
+ cachedJavaClasses[JC_FS_DATA_OUTPUT_STREAM].className =
+ "org/apache/hadoop/fs/FSDataOutputStream";
+ cachedJavaClasses[JC_FILE_STATUS].className =
+ "org/apache/hadoop/fs/FileStatus";
+ cachedJavaClasses[JC_FS_PERMISSION].className =
+ "org/apache/hadoop/fs/permission/FsPermission";
+ cachedJavaClasses[JC_READ_STATISTICS].className =
+ "org/apache/hadoop/hdfs/ReadStatistics";
+ cachedJavaClasses[JC_HDFS_DATA_INPUT_STREAM].className =
+ "org/apache/hadoop/hdfs/client/HdfsDataInputStream";
+ cachedJavaClasses[JC_DOMAIN_SOCKET].className =
+ "org/apache/hadoop/net/unix/DomainSocket";
+ cachedJavaClasses[JC_URI].className =
+ "java/net/URI";
+ cachedJavaClasses[JC_BYTE_BUFFER].className =
+ "java/nio/ByteBuffer";
+ cachedJavaClasses[JC_ENUM_SET].className =
+ "java/util/EnumSet";
+ cachedJavaClasses[JC_EXCEPTION_UTILS].className =
+ "org/apache/commons/lang3/exception/ExceptionUtils";
+
+ // Create and set the jclass objects based on the class names set above
+ jthrowable jthr;
+ int numCachedClasses =
+ sizeof(cachedJavaClasses) / sizeof(javaClassAndName);
+ for (int i = 0; i < numCachedClasses; i++) {
+ jthr = initCachedClass(env, cachedJavaClasses[i].className,
+ &cachedJavaClasses[i].javaClass);
+ if (jthr) {
+ mutexUnlock(&jclassInitMutex);
+ return jthr;
+ }
+ }
+ jclassesInitialized = 1;
+ }
+ mutexUnlock(&jclassInitMutex);
+ return NULL;
+}
+
+jclass getJclass(CachedJavaClass cachedJavaClass) {
+ return cachedJavaClasses[cachedJavaClass].javaClass;
+}
+
+const char *getClassName(CachedJavaClass cachedJavaClass) {
+ return cachedJavaClasses[cachedJavaClass].className;
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jclasses.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jclasses.h
new file mode 100644
index 0000000000000..92cdd542e2371
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jclasses.h
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_JCLASSES_H
+#define LIBHDFS_JCLASSES_H
+
+#include
+
+/**
+ * Encapsulates logic to cache jclass objects so they can re-used across
+ * calls to FindClass. Creating jclass objects every time libhdfs has to
+ * invoke a method can hurt performance. By cacheing jclass objects we avoid
+ * this overhead.
+ *
+ * We use the term "cached" here loosely; jclasses are not truly cached,
+ * instead they are created once during JVM load and are kept alive until the
+ * process shutdowns. There is no eviction of jclass objects.
+ *
+ * @see https://www.ibm.com/developerworks/library/j-jni/index.html#notc
+ */
+
+/**
+ * Each enum value represents one jclass that is cached. Enum values should
+ * be passed to getJclass or getName to get the jclass object or class name
+ * represented by the enum value.
+ */
+typedef enum {
+ JC_CONFIGURATION,
+ JC_PATH,
+ JC_FILE_SYSTEM,
+ JC_FS_STATUS,
+ JC_FILE_UTIL,
+ JC_BLOCK_LOCATION,
+ JC_DFS_HEDGED_READ_METRICS,
+ JC_DISTRIBUTED_FILE_SYSTEM,
+ JC_FS_DATA_INPUT_STREAM,
+ JC_FS_DATA_OUTPUT_STREAM,
+ JC_FILE_STATUS,
+ JC_FS_PERMISSION,
+ JC_READ_STATISTICS,
+ JC_HDFS_DATA_INPUT_STREAM,
+ JC_DOMAIN_SOCKET,
+ JC_URI,
+ JC_BYTE_BUFFER,
+ JC_ENUM_SET,
+ JC_EXCEPTION_UTILS,
+ // A special marker enum that counts the number of cached jclasses
+ NUM_CACHED_CLASSES
+} CachedJavaClass;
+
+/**
+ * Internally initializes all jclass objects listed in the CachedJavaClass
+ * enum. This method is idempotent and thread-safe.
+ */
+jthrowable initCachedClasses(JNIEnv* env);
+
+/**
+ * Return the jclass object represented by the given CachedJavaClass
+ */
+jclass getJclass(CachedJavaClass cachedJavaClass);
+
+/**
+ * Return the class name represented by the given CachedJavaClass
+ */
+const char *getClassName(CachedJavaClass cachedJavaClass);
+
+/* Some frequently used HDFS class names */
+#define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
+#define HADOOP_PATH "org/apache/hadoop/fs/Path"
+#define HADOOP_LOCALFS "org/apache/hadoop/fs/LocalFileSystem"
+#define HADOOP_FS "org/apache/hadoop/fs/FileSystem"
+#define HADOOP_FSSTATUS "org/apache/hadoop/fs/FsStatus"
+#define HADOOP_FILEUTIL "org/apache/hadoop/fs/FileUtil"
+#define HADOOP_BLK_LOC "org/apache/hadoop/fs/BlockLocation"
+#define HADOOP_DFS_HRM "org/apache/hadoop/hdfs/DFSHedgedReadMetrics"
+#define HADOOP_DFS "org/apache/hadoop/hdfs/DistributedFileSystem"
+#define HADOOP_FSDISTRM "org/apache/hadoop/fs/FSDataInputStream"
+#define HADOOP_FSDOSTRM "org/apache/hadoop/fs/FSDataOutputStream"
+#define HADOOP_FILESTAT "org/apache/hadoop/fs/FileStatus"
+#define HADOOP_FSPERM "org/apache/hadoop/fs/permission/FsPermission"
+#define HADOOP_RSTAT "org/apache/hadoop/hdfs/ReadStatistics"
+#define HADOOP_HDISTRM "org/apache/hadoop/hdfs/client/HdfsDataInputStream"
+#define HADOOP_RO "org/apache/hadoop/fs/ReadOption"
+#define HADOOP_DS "org/apache/hadoop/net/unix/DomainSocket"
+
+/* Some frequently used Java class names */
+#define JAVA_NET_ISA "java/net/InetSocketAddress"
+#define JAVA_NET_URI "java/net/URI"
+#define JAVA_BYTEBUFFER "java/nio/ByteBuffer"
+#define JAVA_STRING "java/lang/String"
+#define JAVA_ENUMSET "java/util/EnumSet"
+
+/* Some frequently used third-party class names */
+
+#define EXCEPTION_UTILS "org/apache/commons/lang3/exception/ExceptionUtils"
+
+#endif /*LIBHDFS_JCLASSES_H*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
index 91a3c1cafc8f4..ccc1e3f6b8f0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
@@ -18,9 +18,9 @@
#include "config.h"
#include "exception.h"
+#include "jclasses.h"
#include "jni_helper.h"
#include "platform.h"
-#include "common/htable.h"
#include "os/mutexes.h"
#include "os/thread_local_storage.h"
@@ -29,8 +29,6 @@
#include
#include
-static struct htable *gClassRefHTable = NULL;
-
/** The Native return types that methods could return */
#define JVOID 'V'
#define JOBJECT 'L'
@@ -44,13 +42,6 @@ static struct htable *gClassRefHTable = NULL;
#define JFLOAT 'F'
#define JDOUBLE 'D'
-
-/**
- * MAX_HASH_TABLE_ELEM: The maximum no. of entries in the hashtable.
- * It's set to 4096 to account for (classNames + No. of threads)
- */
-#define MAX_HASH_TABLE_ELEM 4096
-
/**
* Length of buffer for retrieving created JVMs. (We only ever create one.)
*/
@@ -108,32 +99,27 @@ jthrowable newCStr(JNIEnv *env, jstring jstr, char **out)
return NULL;
}
-jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
- jobject instObj, const char *className,
- const char *methName, const char *methSignature, ...)
+/**
+ * Does the work to actually execute a Java method. Takes in an existing jclass
+ * object and a va_list of arguments for the Java method to be invoked.
+ */
+static jthrowable invokeMethodOnJclass(JNIEnv *env, jvalue *retval,
+ MethType methType, jobject instObj, jclass cls, const char *className,
+ const char *methName, const char *methSignature, va_list args)
{
- va_list args;
- jclass cls;
jmethodID mid;
jthrowable jthr;
- const char *str;
+ const char *str;
char returnType;
-
- jthr = validateMethodType(env, methType);
- if (jthr)
- return jthr;
- jthr = globalClassReference(className, env, &cls);
- if (jthr)
- return jthr;
- jthr = methodIdFromClass(className, methName, methSignature,
- methType, env, &mid);
+
+ jthr = methodIdFromClass(cls, className, methName, methSignature, methType,
+ env, &mid);
if (jthr)
return jthr;
str = methSignature;
while (*str != ')') str++;
str++;
returnType = *str;
- va_start(args, methSignature);
if (returnType == JOBJECT || returnType == JARRAYOBJECT) {
jobject jobj = NULL;
if (methType == STATIC) {
@@ -192,7 +178,6 @@ jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
}
retval->i = ji;
}
- va_end(args);
jthr = (*env)->ExceptionOccurred(env);
if (jthr) {
@@ -202,43 +187,115 @@ jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
return NULL;
}
-jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out, const char *className,
- const char *ctorSignature, ...)
+jthrowable findClassAndInvokeMethod(JNIEnv *env, jvalue *retval,
+ MethType methType, jobject instObj, const char *className,
+ const char *methName, const char *methSignature, ...)
{
+ jclass cls = NULL;
+ jthrowable jthr = NULL;
+
va_list args;
- jclass cls;
- jmethodID mid;
+ va_start(args, methSignature);
+
+ jthr = validateMethodType(env, methType);
+ if (jthr) {
+ goto done;
+ }
+
+ cls = (*env)->FindClass(env, className);
+ if (!cls) {
+ jthr = getPendingExceptionAndClear(env);
+ goto done;
+ }
+
+ jthr = invokeMethodOnJclass(env, retval, methType, instObj, cls,
+ className, methName, methSignature, args);
+
+done:
+ va_end(args);
+ destroyLocalReference(env, cls);
+ return jthr;
+}
+
+jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
+ jobject instObj, CachedJavaClass class,
+ const char *methName, const char *methSignature, ...)
+{
+ jthrowable jthr;
+
+ va_list args;
+ va_start(args, methSignature);
+
+ jthr = invokeMethodOnJclass(env, retval, methType, instObj,
+ getJclass(class), getClassName(class), methName, methSignature,
+ args);
+
+ va_end(args);
+ return jthr;
+}
+
+static jthrowable constructNewObjectOfJclass(JNIEnv *env,
+ jobject *out, jclass cls, const char *className,
+ const char *ctorSignature, va_list args) {
+ jmethodID mid;
jobject jobj;
jthrowable jthr;
- jthr = globalClassReference(className, env, &cls);
+ jthr = methodIdFromClass(cls, className, "", ctorSignature, INSTANCE,
+ env, &mid);
if (jthr)
return jthr;
- jthr = methodIdFromClass(className, "", ctorSignature,
- INSTANCE, env, &mid);
- if (jthr)
- return jthr;
- va_start(args, ctorSignature);
jobj = (*env)->NewObjectV(env, cls, mid, args);
- va_end(args);
if (!jobj)
return getPendingExceptionAndClear(env);
*out = jobj;
return NULL;
}
-
-jthrowable methodIdFromClass(const char *className, const char *methName,
- const char *methSignature, MethType methType,
- JNIEnv *env, jmethodID *out)
+jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out,
+ const char *className, const char *ctorSignature, ...)
{
+ va_list args;
jclass cls;
+ jthrowable jthr = NULL;
+
+ cls = (*env)->FindClass(env, className);
+ if (!cls) {
+ jthr = getPendingExceptionAndClear(env);
+ goto done;
+ }
+
+ va_start(args, ctorSignature);
+ jthr = constructNewObjectOfJclass(env, out, cls, className,
+ ctorSignature, args);
+ va_end(args);
+done:
+ destroyLocalReference(env, cls);
+ return jthr;
+}
+
+jthrowable constructNewObjectOfCachedClass(JNIEnv *env, jobject *out,
+ CachedJavaClass cachedJavaClass, const char *ctorSignature, ...)
+{
+ jthrowable jthr = NULL;
+ va_list args;
+ va_start(args, ctorSignature);
+
+ jthr = constructNewObjectOfJclass(env, out,
+ getJclass(cachedJavaClass), getClassName(cachedJavaClass),
+ ctorSignature, args);
+
+ va_end(args);
+ return jthr;
+}
+
+jthrowable methodIdFromClass(jclass cls, const char *className,
+ const char *methName, const char *methSignature, MethType methType,
+ JNIEnv *env, jmethodID *out)
+{
jthrowable jthr;
jmethodID mid = 0;
- jthr = globalClassReference(className, env, &cls);
- if (jthr)
- return jthr;
jthr = validateMethodType(env, methType);
if (jthr)
return jthr;
@@ -257,54 +314,6 @@ jthrowable methodIdFromClass(const char *className, const char *methName,
return NULL;
}
-jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out)
-{
- jthrowable jthr = NULL;
- jclass local_clazz = NULL;
- jclass clazz = NULL;
- int ret;
-
- mutexLock(&hdfsHashMutex);
- if (!gClassRefHTable) {
- gClassRefHTable = htable_alloc(MAX_HASH_TABLE_ELEM, ht_hash_string,
- ht_compare_string);
- if (!gClassRefHTable) {
- jthr = newRuntimeError(env, "htable_alloc failed\n");
- goto done;
- }
- }
- clazz = htable_get(gClassRefHTable, className);
- if (clazz) {
- *out = clazz;
- goto done;
- }
- local_clazz = (*env)->FindClass(env,className);
- if (!local_clazz) {
- jthr = getPendingExceptionAndClear(env);
- goto done;
- }
- clazz = (*env)->NewGlobalRef(env, local_clazz);
- if (!clazz) {
- jthr = getPendingExceptionAndClear(env);
- goto done;
- }
- ret = htable_put(gClassRefHTable, (void*)className, clazz);
- if (ret) {
- jthr = newRuntimeError(env, "htable_put failed with error "
- "code %d\n", ret);
- goto done;
- }
- *out = clazz;
- jthr = NULL;
-done:
- mutexUnlock(&hdfsHashMutex);
- (*env)->DeleteLocalRef(env, local_clazz);
- if (jthr && clazz) {
- (*env)->DeleteGlobalRef(env, clazz);
- }
- return jthr;
-}
-
jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
{
jthrowable jthr;
@@ -358,7 +367,6 @@ jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
return jthr;
}
-
/**
* For the given path, expand it by filling in with all *.jar or *.JAR files,
* separated by PATH_SEPARATOR. Assumes that expanded is big enough to hold the
@@ -731,14 +739,17 @@ static JNIEnv* getGlobalJNIEnv(void)
"with error: %d\n", rv);
return NULL;
}
- jthr = invokeMethod(env, NULL, STATIC, NULL,
- "org/apache/hadoop/fs/FileSystem",
- "loadFileSystems", "()V");
+
+ // We use findClassAndInvokeMethod here because the jclasses in
+ // jclasses.h have not loaded yet
+ jthr = findClassAndInvokeMethod(env, NULL, STATIC, NULL, HADOOP_FS,
+ "loadFileSystems", "()V");
if (jthr) {
- printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "loadFileSystems");
+ printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ "FileSystem: loadFileSystems failed");
+ return NULL;
}
- }
- else {
+ } else {
//Attach this thread to the VM
vm = vmBuf[0];
rv = (*vm)->AttachCurrentThread(vm, (void*)&env, 0);
@@ -809,6 +820,15 @@ JNIEnv* getJNIEnv(void)
state->env = getGlobalJNIEnv();
mutexUnlock(&jvmMutex);
+
+ jthrowable jthr = NULL;
+ jthr = initCachedClasses(state->env);
+ if (jthr) {
+ printExceptionAndFree(state->env, jthr, PRINT_EXC_ALL,
+ "initCachedClasses failed");
+ goto fail;
+ }
+
if (!state->env) {
goto fail;
}
@@ -898,8 +918,7 @@ jthrowable hadoopConfSetStr(JNIEnv *env, jobject jConfiguration,
if (jthr)
goto done;
jthr = invokeMethod(env, NULL, INSTANCE, jConfiguration,
- "org/apache/hadoop/conf/Configuration", "set",
- "(Ljava/lang/String;Ljava/lang/String;)V",
+ JC_CONFIGURATION, "set", "(Ljava/lang/String;Ljava/lang/String;)V",
jkey, jvalue);
if (jthr)
goto done;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.h
index f0d06d72fc040..41d6fab2a75ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.h
@@ -19,6 +19,8 @@
#ifndef LIBHDFS_JNI_HELPER_H
#define LIBHDFS_JNI_HELPER_H
+#include "jclasses.h"
+
#include
#include
@@ -36,7 +38,6 @@
// #define _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
-
/** Denote the method we want to invoke as STATIC or INSTANCE */
typedef enum {
STATIC,
@@ -74,12 +75,12 @@ jthrowable newJavaStr(JNIEnv *env, const char *str, jstring *out);
void destroyLocalReference(JNIEnv *env, jobject jObject);
/** invokeMethod: Invoke a Static or Instance method.
- * className: Name of the class where the method can be found
* methName: Name of the method
* methSignature: the signature of the method "(arg-types)ret-type"
* methType: The type of the method (STATIC or INSTANCE)
* instObj: Required if the methType is INSTANCE. The object to invoke
the method on.
+ * class: The CachedJavaClass to call the method on.
* env: The JNIEnv pointer
* retval: The pointer to a union type which will contain the result of the
method invocation, e.g. if the method returns an Object, retval will be
@@ -91,17 +92,33 @@ void destroyLocalReference(JNIEnv *env, jobject jObject);
a valid exception reference, and the result stored at retval is undefined.
*/
jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
- jobject instObj, const char *className, const char *methName,
- const char *methSignature, ...);
+ jobject instObj, CachedJavaClass class,
+ const char *methName, const char *methSignature, ...);
-jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out, const char *className,
- const char *ctorSignature, ...);
+/**
+ * findClassAndInvokeMethod: Same as invokeMethod, but it calls FindClass on
+ * the given className first and then calls invokeMethod. This method exists
+ * mainly for test infrastructure, any production code should use
+ * invokeMethod. Calling FindClass repeatedly can introduce performance
+ * overhead, so users should prefer invokeMethod and supply a CachedJavaClass.
+ */
+jthrowable findClassAndInvokeMethod(JNIEnv *env, jvalue *retval,
+ MethType methType, jobject instObj, const char *className,
+ const char *methName, const char *methSignature, ...);
-jthrowable methodIdFromClass(const char *className, const char *methName,
- const char *methSignature, MethType methType,
- JNIEnv *env, jmethodID *out);
+jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out,
+ const char *className, const char *ctorSignature, ...);
+
+/**
+ * Same as constructNewObjectOfClass but it takes in a CachedJavaClass
+ * rather than a className. This avoids an extra call to FindClass.
+ */
+jthrowable constructNewObjectOfCachedClass(JNIEnv *env, jobject *out,
+ CachedJavaClass cachedJavaClass, const char *ctorSignature, ...);
-jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out);
+jthrowable methodIdFromClass(jclass cls, const char *className,
+ const char *methName, const char *methSignature, MethType methType,
+ JNIEnv *env, jmethodID *out);
/** classNameOfObject: Get an object's class name.
* @param jobj: The object.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/mutexes.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/mutexes.h
index da30bf4974f77..92afabd7c75c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/mutexes.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/mutexes.h
@@ -30,12 +30,12 @@
#include "platform.h"
-/** Mutex protecting the class reference hash table. */
-extern mutex hdfsHashMutex;
-
/** Mutex protecting singleton JVM instance. */
extern mutex jvmMutex;
+/** Mutex protecting initialization of jclasses in jclasses.h. */
+extern mutex jclassInitMutex;
+
/**
* Locks a mutex.
*
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/mutexes.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/mutexes.c
index 20dafaa020b99..5c6b429d5ec03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/mutexes.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/mutexes.c
@@ -21,8 +21,8 @@
#include
#include
-mutex hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
mutex jvmMutex;
+mutex jclassInitMutex = PTHREAD_MUTEX_INITIALIZER;
pthread_mutexattr_t jvmMutexAttr;
__attribute__((constructor)) static void init() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
index 110c71a855853..a0f26c6cb6e71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
@@ -23,12 +23,20 @@
#include
#include
+#include "exception.h"
+#include "jni_helper.h"
+
+#define UNKNOWN "UNKNOWN"
+#define MAXTHRID 256
+
/** Key that allows us to retrieve thread-local storage */
static pthread_key_t gTlsKey;
/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
static int gTlsKeyInitialized = 0;
+static void get_current_thread_id(JNIEnv* env, char* id, int max);
+
/**
* The function that is called whenever a thread with libhdfs thread local data
* is destroyed.
@@ -41,16 +49,35 @@ void hdfsThreadDestructor(void *v)
struct ThreadLocalState *state = (struct ThreadLocalState*)v;
JNIEnv *env = state->env;;
jint ret;
+ jthrowable jthr;
+ char thr_name[MAXTHRID];
/* Detach the current thread from the JVM */
- if ((env != NULL) && (*env != NULL)) {
+ if (env) {
ret = (*env)->GetJavaVM(env, &vm);
- if (ret) {
+
+ if (ret != 0) {
fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with error %d\n",
ret);
- (*env)->ExceptionDescribe(env);
+ jthr = (*env)->ExceptionOccurred(env);
+ if (jthr) {
+ (*env)->ExceptionDescribe(env);
+ (*env)->ExceptionClear(env);
+ }
} else {
- (*vm)->DetachCurrentThread(vm);
+ ret = (*vm)->DetachCurrentThread(vm);
+
+ if (ret != JNI_OK) {
+ jthr = (*env)->ExceptionOccurred(env);
+ if (jthr) {
+ (*env)->ExceptionDescribe(env);
+ (*env)->ExceptionClear(env);
+ }
+ get_current_thread_id(env, thr_name, MAXTHRID);
+
+ fprintf(stderr, "hdfsThreadDestructor: Unable to detach thread %s "
+ "from the JVM. Error code: %d\n", thr_name, ret);
+ }
}
}
@@ -62,13 +89,73 @@ void hdfsThreadDestructor(void *v)
free(state);
}
+static void get_current_thread_id(JNIEnv* env, char* id, int max) {
+ jvalue jVal;
+ jobject thr = NULL;
+ jstring thr_name = NULL;
+ jlong thr_id = 0;
+ jthrowable jthr = NULL;
+ const char *thr_name_str;
+
+ jthr = findClassAndInvokeMethod(env, &jVal, STATIC, NULL, "java/lang/Thread",
+ "currentThread", "()Ljava/lang/Thread;");
+ if (jthr) {
+ snprintf(id, max, "%s", UNKNOWN);
+ printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ "get_current_thread_id: Thread#currentThread failed: ");
+ goto done;
+ }
+ thr = jVal.l;
+
+ jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, thr,
+ "java/lang/Thread", "getId", "()J");
+ if (jthr) {
+ snprintf(id, max, "%s", UNKNOWN);
+ printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ "get_current_thread_id: Thread#getId failed: ");
+ goto done;
+ }
+ thr_id = jVal.j;
+
+ jthr = findClassAndInvokeMethod(env, &jVal, INSTANCE, thr,
+ "java/lang/Thread", "toString", "()Ljava/lang/String;");
+ if (jthr) {
+ snprintf(id, max, "%s:%ld", UNKNOWN, thr_id);
+ printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ "get_current_thread_id: Thread#toString failed: ");
+ goto done;
+ }
+ thr_name = jVal.l;
+
+ thr_name_str = (*env)->GetStringUTFChars(env, thr_name, NULL);
+ if (!thr_name_str) {
+ printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+ "get_current_thread_id: GetStringUTFChars failed: ");
+ snprintf(id, max, "%s:%ld", UNKNOWN, thr_id);
+ goto done;
+ }
+
+ // Treating the jlong as a long *should* be safe
+ snprintf(id, max, "%s:%ld", thr_name_str, thr_id);
+
+ // Release the char*
+ (*env)->ReleaseStringUTFChars(env, thr_name, thr_name_str);
+
+done:
+ destroyLocalReference(env, thr);
+ destroyLocalReference(env, thr_name);
+
+ // Make sure the id is null terminated in case we overflow the max length
+ id[max - 1] = '\0';
+}
+
struct ThreadLocalState* threadLocalStorageCreate()
{
struct ThreadLocalState *state;
state = (struct ThreadLocalState*)malloc(sizeof(struct ThreadLocalState));
if (state == NULL) {
fprintf(stderr,
- "threadLocalStorageSet: OOM - Unable to allocate thread local state\n");
+ "threadLocalStorageCreate: OOM - Unable to allocate thread local state\n");
return NULL;
}
state->lastExceptionStackTrace = NULL;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/mutexes.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/mutexes.c
index 875f03386a817..ac7f9fd35b8af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/mutexes.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/mutexes.c
@@ -20,8 +20,8 @@
#include
-mutex hdfsHashMutex;
mutex jvmMutex;
+mutex jclassInitMutex;
/**
* Unfortunately, there is no simple static initializer for a critical section.
@@ -34,8 +34,8 @@ mutex jvmMutex;
* http://msdn.microsoft.com/en-us/library/bb918180.aspx
*/
static void __cdecl initializeMutexes(void) {
- InitializeCriticalSection(&hdfsHashMutex);
InitializeCriticalSection(&jvmMutex);
+ InitializeCriticalSection(&jclassInitMutex);
}
#pragma section(".CRT$XCU", read)
__declspec(allocate(".CRT$XCU"))
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
index 2da5b6bbe52e3..f64eec10a8b98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
@@ -48,6 +48,7 @@ find_package(GSasl)
find_package(Threads)
include(CheckCXXSourceCompiles)
+include(CheckSymbolExists)
# Check if thread_local is supported
unset (THREAD_LOCAL_SUPPORTED CACHE)
@@ -141,6 +142,11 @@ else (NOT NO_SASL)
message(STATUS "Compiling with NO SASL SUPPORT")
endif (NOT NO_SASL)
+check_symbol_exists(explicit_bzero "string.h" HAVE_EXPLICIT_BZERO)
+if(HAVE_EXPLICIT_BZERO)
+ add_definitions(-DHAVE_EXPLICIT_BZERO)
+endif()
+
add_definitions(-DASIO_STANDALONE -DASIO_CPP11_DATE_TIME)
# Disable optimizations if compiling debug
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
index 6b2468fd5dbdc..549da93c2aa89 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
@@ -1402,7 +1402,11 @@ int hdfsGetBlockLocations(hdfsFS fs, const char *path, struct hdfsBlockLocations
hdfsBlockLocations *locations = new struct hdfsBlockLocations();
(*locations_out) = locations;
+#ifdef HAVE_EXPLICIT_BZERO
+ explicit_bzero(locations, sizeof(*locations));
+#else
bzero(locations, sizeof(*locations));
+#endif
locations->fileLength = ppLocations->getFileLength();
locations->isLastBlockComplete = ppLocations->isLastBlockComplete();
locations->isUnderConstruction = ppLocations->isUnderConstruction();
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc
index f364d0e15a978..29255ef282882 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc
@@ -475,7 +475,11 @@ TEST_F(HdfsExtTest, TestReadStats) {
hdfsFile file = hdfsOpenFile(fs, path.c_str(), O_WRONLY, 0, 0, 0);
EXPECT_NE(nullptr, file);
void * buf = malloc(size);
+#ifdef HAVE_EXPLICIT_BZERO
+ explicit_bzero(buf, size);
+#else
bzero(buf, size);
+#endif
EXPECT_EQ(size, hdfsWrite(fs, file, buf, size));
free(buf);
EXPECT_EQ(0, hdfsCloseFile(fs, file));
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
index 54d4cf651eb9e..bda27b9a43202 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
@@ -317,6 +317,12 @@ tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position,
return ret;
}
+int hdfsPreadFully(hdfsFS fs, hdfsFile file, tOffset position,
+ void* buffer, tSize length) {
+ return libhdfs_hdfsPreadFully(fs->libhdfsRep, file->libhdfsRep, position,
+ buffer, length);
+}
+
tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer,
tSize length) {
return libhdfs_hdfsWrite(fs->libhdfsRep, file->libhdfsRep, buffer, length);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h
index aecced1a8b6e5..320a958b10c0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h
@@ -92,7 +92,11 @@ class HdfsHandle {
hdfsFile file = hdfsOpenFile(*this, path.c_str(), O_WRONLY, 0, 0, 0);
EXPECT_NE(nullptr, file);
void * buf = malloc(size);
+#ifdef HAVE_EXPLICIT_BZERO
+ explicit_bzero(buf, size);
+#else
bzero(buf, size);
+#endif
EXPECT_EQ(1024, hdfsWrite(*this, file, buf, size));
EXPECT_EQ(0, hdfsCloseFile(*this, file));
free(buf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h
index b90776893f6b8..0d014341b4c57 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h
@@ -47,6 +47,7 @@
#define hdfsTell libhdfs_hdfsTell
#define hdfsRead libhdfs_hdfsRead
#define hdfsPread libhdfs_hdfsPread
+#define hdfsPreadFully libhdfs_hdfsPreadFully
#define hdfsWrite libhdfs_hdfsWrite
#define hdfsFlush libhdfs_hdfsFlush
#define hdfsHFlush libhdfs_hdfsHFlush
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
index fce0e823ddeb8..d46768c02ad39 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
@@ -47,6 +47,7 @@
#undef hdfsTell
#undef hdfsRead
#undef hdfsPread
+#undef hdfsPreadFully
#undef hdfsWrite
#undef hdfsFlush
#undef hdfsHFlush
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
index d0411c2126c88..4b08d0556c3aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
@@ -47,6 +47,7 @@
#define hdfsTell libhdfspp_hdfsTell
#define hdfsRead libhdfspp_hdfsRead
#define hdfsPread libhdfspp_hdfsPread
+#define hdfsPreadFully libhdfspp_hdfsPreadFully
#define hdfsWrite libhdfspp_hdfsWrite
#define hdfsFlush libhdfspp_hdfsFlush
#define hdfsHFlush libhdfspp_hdfsHFlush
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java
new file mode 100644
index 0000000000000..4547db1c98ecb
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java
@@ -0,0 +1,269 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This class tests the DFS positional read functionality on a single node
+ * mini-cluster. These tests are inspired from {@link TestPread}. The tests
+ * are much less comprehensive than other pread tests because pread already
+ * internally uses {@link ByteBuffer}s.
+ */
+public class TestByteBufferPread {
+
+ private static MiniDFSCluster cluster;
+ private static FileSystem fs;
+ private static byte[] fileContents;
+ private static Path testFile;
+ private static Random rand;
+
+ private static final long SEED = 0xDEADBEEFL;
+ private static final int BLOCK_SIZE = 4096;
+ private static final int FILE_SIZE = 12 * BLOCK_SIZE;
+
+ @BeforeClass
+ public static void setup() throws IOException {
+ // Setup the cluster with a small block size so we can create small files
+ // that span multiple blocks
+ Configuration conf = new Configuration();
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+ fs = cluster.getFileSystem();
+
+ // Create a test file that spans 12 blocks, and contains a bunch of random
+ // bytes
+ fileContents = new byte[FILE_SIZE];
+ rand = new Random(SEED);
+ rand.nextBytes(fileContents);
+ testFile = new Path("/byte-buffer-pread-test.dat");
+ try (FSDataOutputStream out = fs.create(testFile, (short) 3)) {
+ out.write(fileContents);
+ }
+ }
+
+ /**
+ * Test preads with {@link java.nio.HeapByteBuffer}s.
+ */
+ @Test
+ public void testPreadWithHeapByteBuffer() throws IOException {
+ testPreadWithByteBuffer(ByteBuffer.allocate(FILE_SIZE));
+ testPreadWithFullByteBuffer(ByteBuffer.allocate(FILE_SIZE));
+ testPreadWithPositionedByteBuffer(ByteBuffer.allocate(FILE_SIZE));
+ testPreadWithLimitedByteBuffer(ByteBuffer.allocate(FILE_SIZE));
+ testPositionedPreadWithByteBuffer(ByteBuffer.allocate(FILE_SIZE));
+ }
+
+ /**
+ * Test preads with {@link java.nio.DirectByteBuffer}s.
+ */
+ @Test
+ public void testPreadWithDirectByteBuffer() throws IOException {
+ testPreadWithByteBuffer(ByteBuffer.allocateDirect(FILE_SIZE));
+ testPreadWithFullByteBuffer(ByteBuffer.allocateDirect(FILE_SIZE));
+ testPreadWithPositionedByteBuffer(ByteBuffer.allocateDirect(FILE_SIZE));
+ testPreadWithLimitedByteBuffer(ByteBuffer.allocateDirect(FILE_SIZE));
+ testPositionedPreadWithByteBuffer(ByteBuffer.allocateDirect(FILE_SIZE));
+ }
+
+ /**
+ * Reads the entire testFile using the pread API and validates that its
+ * contents are properly loaded into the supplied {@link ByteBuffer}.
+ */
+ private void testPreadWithByteBuffer(ByteBuffer buffer) throws IOException {
+ int bytesRead;
+ int totalBytesRead = 0;
+ try (FSDataInputStream in = fs.open(testFile)) {
+ while ((bytesRead = in.read(totalBytesRead, buffer)) > 0) {
+ totalBytesRead += bytesRead;
+ // Check that each call to read changes the position of the ByteBuffer
+ // correctly
+ assertEquals(totalBytesRead, buffer.position());
+ }
+
+ // Make sure the buffer is full
+ assertFalse(buffer.hasRemaining());
+ // Make sure the contents of the read buffer equal the contents of the
+ // file
+ buffer.position(0);
+ byte[] bufferContents = new byte[FILE_SIZE];
+ buffer.get(bufferContents);
+ assertArrayEquals(bufferContents, fileContents);
+ buffer.position(buffer.limit());
+ }
+ }
+
+ /**
+ * Attempts to read the testFile into a {@link ByteBuffer} that is already
+ * full, and validates that doing so does not change the contents of the
+ * supplied {@link ByteBuffer}.
+ */
+ private void testPreadWithFullByteBuffer(ByteBuffer buffer)
+ throws IOException {
+ // Load some dummy data into the buffer
+ byte[] existingBufferBytes = new byte[FILE_SIZE];
+ rand.nextBytes(existingBufferBytes);
+ buffer.put(existingBufferBytes);
+ // Make sure the buffer is full
+ assertFalse(buffer.hasRemaining());
+
+ try (FSDataInputStream in = fs.open(testFile)) {
+ // Attempt to read into the buffer, 0 bytes should be read since the
+ // buffer is full
+ assertEquals(0, in.read(buffer));
+
+ // Double check the buffer is still full and its contents have not
+ // changed
+ assertFalse(buffer.hasRemaining());
+ buffer.position(0);
+ byte[] bufferContents = new byte[FILE_SIZE];
+ buffer.get(bufferContents);
+ assertArrayEquals(bufferContents, existingBufferBytes);
+ }
+ }
+
+ /**
+ * Reads half of the testFile into the {@link ByteBuffer} by setting a
+ * {@link ByteBuffer#limit} on the buffer. Validates that only half of the
+ * testFile is loaded into the buffer.
+ */
+ private void testPreadWithLimitedByteBuffer(
+ ByteBuffer buffer) throws IOException {
+ int bytesRead;
+ int totalBytesRead = 0;
+ // Set the buffer limit to half the size of the file
+ buffer.limit(FILE_SIZE / 2);
+
+ try (FSDataInputStream in = fs.open(testFile)) {
+ while ((bytesRead = in.read(totalBytesRead, buffer)) > 0) {
+ totalBytesRead += bytesRead;
+ // Check that each call to read changes the position of the ByteBuffer
+ // correctly
+ assertEquals(totalBytesRead, buffer.position());
+ }
+
+ // Since we set the buffer limit to half the size of the file, we should
+ // have only read half of the file into the buffer
+ assertEquals(totalBytesRead, FILE_SIZE / 2);
+ // Check that the buffer is full and the contents equal the first half of
+ // the file
+ assertFalse(buffer.hasRemaining());
+ buffer.position(0);
+ byte[] bufferContents = new byte[FILE_SIZE / 2];
+ buffer.get(bufferContents);
+ assertArrayEquals(bufferContents,
+ Arrays.copyOfRange(fileContents, 0, FILE_SIZE / 2));
+ }
+ }
+
+ /**
+ * Reads half of the testFile into the {@link ByteBuffer} by setting the
+ * {@link ByteBuffer#position} the half the size of the file. Validates that
+ * only half of the testFile is loaded into the buffer.
+ */
+ private void testPreadWithPositionedByteBuffer(
+ ByteBuffer buffer) throws IOException {
+ int bytesRead;
+ int totalBytesRead = 0;
+ // Set the buffer position to half the size of the file
+ buffer.position(FILE_SIZE / 2);
+
+ try (FSDataInputStream in = fs.open(testFile)) {
+ while ((bytesRead = in.read(totalBytesRead, buffer)) > 0) {
+ totalBytesRead += bytesRead;
+ // Check that each call to read changes the position of the ByteBuffer
+ // correctly
+ assertEquals(totalBytesRead + FILE_SIZE / 2, buffer.position());
+ }
+
+ // Since we set the buffer position to half the size of the file, we
+ // should have only read half of the file into the buffer
+ assertEquals(totalBytesRead, FILE_SIZE / 2);
+ // Check that the buffer is full and the contents equal the first half of
+ // the file
+ assertFalse(buffer.hasRemaining());
+ buffer.position(FILE_SIZE / 2);
+ byte[] bufferContents = new byte[FILE_SIZE / 2];
+ buffer.get(bufferContents);
+ assertArrayEquals(bufferContents,
+ Arrays.copyOfRange(fileContents, 0, FILE_SIZE / 2));
+ }
+ }
+
+ /**
+ * Reads half of the testFile into the {@link ByteBuffer} by specifying a
+ * position for the pread API that is half of the file size. Validates that
+ * only half of the testFile is loaded into the buffer.
+ */
+ private void testPositionedPreadWithByteBuffer(
+ ByteBuffer buffer) throws IOException {
+ int bytesRead;
+ int totalBytesRead = 0;
+
+ try (FSDataInputStream in = fs.open(testFile)) {
+ // Start reading from halfway through the file
+ while ((bytesRead = in.read(totalBytesRead + FILE_SIZE / 2,
+ buffer)) > 0) {
+ totalBytesRead += bytesRead;
+ // Check that each call to read changes the position of the ByteBuffer
+ // correctly
+ assertEquals(totalBytesRead, buffer.position());
+ }
+
+ // Since we starting reading halfway through the file, the buffer should
+ // only be half full
+ assertEquals(totalBytesRead, FILE_SIZE / 2);
+ assertEquals(buffer.position(), FILE_SIZE / 2);
+ assertTrue(buffer.hasRemaining());
+ // Check that the buffer contents equal the second half of the file
+ buffer.position(0);
+ byte[] bufferContents = new byte[FILE_SIZE / 2];
+ buffer.get(bufferContents);
+ assertArrayEquals(bufferContents,
+ Arrays.copyOfRange(fileContents, FILE_SIZE / 2, FILE_SIZE));
+ }
+ }
+
+ @AfterClass
+ public static void shutdown() throws IOException {
+ try {
+ fs.delete(testFile, false);
+ fs.close();
+ } finally {
+ cluster.shutdown(true);
+ }
+ }
+}