getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string methodName = 1;
+ public static final int METHODNAME_FIELD_NUMBER = 1;
+ private java.lang.Object methodName_;
+ /**
+ * required string methodName = 1;
+ *
+ *
+ ** Name of the RPC method
+ *
+ */
+ public boolean hasMethodName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string methodName = 1;
+ *
+ *
+ ** Name of the RPC method
+ *
+ */
+ public java.lang.String getMethodName() {
+ java.lang.Object ref = methodName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ methodName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string methodName = 1;
+ *
+ *
+ ** Name of the RPC method
+ *
+ */
+ public com.google.protobuf.ByteString
+ getMethodNameBytes() {
+ java.lang.Object ref = methodName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ methodName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required string declaringClassProtocolName = 2;
+ public static final int DECLARINGCLASSPROTOCOLNAME_FIELD_NUMBER = 2;
+ private java.lang.Object declaringClassProtocolName_;
+ /**
+ * required string declaringClassProtocolName = 2;
+ *
+ *
+ **
+ * RPCs for a particular interface (ie protocol) are done using a
+ * IPC connection that is setup using rpcProxy.
+ * The rpcProxy's has a declared protocol name that is
+ * sent form client to server at connection time.
+ *
+ * Each Rpc call also sends a protocol name
+ * (called declaringClassprotocolName). This name is usually the same
+ * as the connection protocol name except in some cases.
+ * For example metaProtocols such ProtocolInfoProto which get metainfo
+ * about the protocol reuse the connection but need to indicate that
+ * the actual protocol is different (i.e. the protocol is
+ * ProtocolInfoProto) since they reuse the connection; in this case
+ * the declaringClassProtocolName field is set to the ProtocolInfoProto
+ *
+ */
+ public boolean hasDeclaringClassProtocolName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string declaringClassProtocolName = 2;
+ *
+ *
+ **
+ * RPCs for a particular interface (ie protocol) are done using a
+ * IPC connection that is setup using rpcProxy.
+ * The rpcProxy's has a declared protocol name that is
+ * sent form client to server at connection time.
+ *
+ * Each Rpc call also sends a protocol name
+ * (called declaringClassprotocolName). This name is usually the same
+ * as the connection protocol name except in some cases.
+ * For example metaProtocols such ProtocolInfoProto which get metainfo
+ * about the protocol reuse the connection but need to indicate that
+ * the actual protocol is different (i.e. the protocol is
+ * ProtocolInfoProto) since they reuse the connection; in this case
+ * the declaringClassProtocolName field is set to the ProtocolInfoProto
+ *
+ */
+ public java.lang.String getDeclaringClassProtocolName() {
+ java.lang.Object ref = declaringClassProtocolName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ declaringClassProtocolName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string declaringClassProtocolName = 2;
+ *
+ *
+ **
+ * RPCs for a particular interface (ie protocol) are done using a
+ * IPC connection that is setup using rpcProxy.
+ * The rpcProxy's has a declared protocol name that is
+ * sent form client to server at connection time.
+ *
+ * Each Rpc call also sends a protocol name
+ * (called declaringClassprotocolName). This name is usually the same
+ * as the connection protocol name except in some cases.
+ * For example metaProtocols such ProtocolInfoProto which get metainfo
+ * about the protocol reuse the connection but need to indicate that
+ * the actual protocol is different (i.e. the protocol is
+ * ProtocolInfoProto) since they reuse the connection; in this case
+ * the declaringClassProtocolName field is set to the ProtocolInfoProto
+ *
+ */
+ public com.google.protobuf.ByteString
+ getDeclaringClassProtocolNameBytes() {
+ java.lang.Object ref = declaringClassProtocolName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ declaringClassProtocolName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required uint64 clientProtocolVersion = 3;
+ public static final int CLIENTPROTOCOLVERSION_FIELD_NUMBER = 3;
+ private long clientProtocolVersion_;
+ /**
+ * required uint64 clientProtocolVersion = 3;
+ *
+ *
+ ** protocol version of class declaring the called method
+ *
+ */
+ public boolean hasClientProtocolVersion() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required uint64 clientProtocolVersion = 3;
+ *
+ *
+ ** protocol version of class declaring the called method
+ *
+ */
+ public long getClientProtocolVersion() {
+ return clientProtocolVersion_;
+ }
+
+ private void initFields() {
+ methodName_ = "";
+ declaringClassProtocolName_ = "";
+ clientProtocolVersion_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasMethodName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasDeclaringClassProtocolName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasClientProtocolVersion()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getMethodNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getDeclaringClassProtocolNameBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeUInt64(3, clientProtocolVersion_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getMethodNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getDeclaringClassProtocolNameBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(3, clientProtocolVersion_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto other = (org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto) obj;
+
+ boolean result = true;
+ result = result && (hasMethodName() == other.hasMethodName());
+ if (hasMethodName()) {
+ result = result && getMethodName()
+ .equals(other.getMethodName());
+ }
+ result = result && (hasDeclaringClassProtocolName() == other.hasDeclaringClassProtocolName());
+ if (hasDeclaringClassProtocolName()) {
+ result = result && getDeclaringClassProtocolName()
+ .equals(other.getDeclaringClassProtocolName());
+ }
+ result = result && (hasClientProtocolVersion() == other.hasClientProtocolVersion());
+ if (hasClientProtocolVersion()) {
+ result = result && (getClientProtocolVersion()
+ == other.getClientProtocolVersion());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasMethodName()) {
+ hash = (37 * hash) + METHODNAME_FIELD_NUMBER;
+ hash = (53 * hash) + getMethodName().hashCode();
+ }
+ if (hasDeclaringClassProtocolName()) {
+ hash = (37 * hash) + DECLARINGCLASSPROTOCOLNAME_FIELD_NUMBER;
+ hash = (53 * hash) + getDeclaringClassProtocolName().hashCode();
+ }
+ if (hasClientProtocolVersion()) {
+ hash = (37 * hash) + CLIENTPROTOCOLVERSION_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getClientProtocolVersion());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hadoop.common.RequestHeaderProto}
+ *
+ *
+ **
+ * This message is the header for the Protobuf Rpc Engine
+ * when sending a RPC request from RPC client to the RPC server.
+ * The actual request (serialized as protobuf) follows this request.
+ *
+ * No special header is needed for the Rpc Response for Protobuf Rpc Engine.
+ * The normal RPC response header (see RpcHeader.proto) are sufficient.
+ *
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProtoOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.class, org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ methodName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ declaringClassProtocolName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ clientProtocolVersion_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_descriptor;
+ }
+
+ public org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto getDefaultInstanceForType() {
+ return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto build() {
+ org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto buildPartial() {
+ org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto result = new org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.methodName_ = methodName_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.declaringClassProtocolName_ = declaringClassProtocolName_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.clientProtocolVersion_ = clientProtocolVersion_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto) {
+ return mergeFrom((org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto other) {
+ if (other == org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.getDefaultInstance()) return this;
+ if (other.hasMethodName()) {
+ bitField0_ |= 0x00000001;
+ methodName_ = other.methodName_;
+ onChanged();
+ }
+ if (other.hasDeclaringClassProtocolName()) {
+ bitField0_ |= 0x00000002;
+ declaringClassProtocolName_ = other.declaringClassProtocolName_;
+ onChanged();
+ }
+ if (other.hasClientProtocolVersion()) {
+ setClientProtocolVersion(other.getClientProtocolVersion());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasMethodName()) {
+
+ return false;
+ }
+ if (!hasDeclaringClassProtocolName()) {
+
+ return false;
+ }
+ if (!hasClientProtocolVersion()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string methodName = 1;
+ private java.lang.Object methodName_ = "";
+ /**
+ * required string methodName = 1;
+ *
+ *
+ ** Name of the RPC method
+ *
+ */
+ public boolean hasMethodName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string methodName = 1;
+ *
+ *
+ ** Name of the RPC method
+ *
+ */
+ public java.lang.String getMethodName() {
+ java.lang.Object ref = methodName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ methodName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string methodName = 1;
+ *
+ *
+ ** Name of the RPC method
+ *
+ */
+ public com.google.protobuf.ByteString
+ getMethodNameBytes() {
+ java.lang.Object ref = methodName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ methodName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string methodName = 1;
+ *
+ *
+ ** Name of the RPC method
+ *
+ */
+ public Builder setMethodName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ methodName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string methodName = 1;
+ *
+ *
+ ** Name of the RPC method
+ *
+ */
+ public Builder clearMethodName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ methodName_ = getDefaultInstance().getMethodName();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string methodName = 1;
+ *
+ *
+ ** Name of the RPC method
+ *
+ */
+ public Builder setMethodNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ methodName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required string declaringClassProtocolName = 2;
+ private java.lang.Object declaringClassProtocolName_ = "";
+ /**
+ * required string declaringClassProtocolName = 2;
+ *
+ *
+ **
+ * RPCs for a particular interface (ie protocol) are done using a
+ * IPC connection that is setup using rpcProxy.
+ * The rpcProxy's has a declared protocol name that is
+ * sent form client to server at connection time.
+ *
+ * Each Rpc call also sends a protocol name
+ * (called declaringClassprotocolName). This name is usually the same
+ * as the connection protocol name except in some cases.
+ * For example metaProtocols such ProtocolInfoProto which get metainfo
+ * about the protocol reuse the connection but need to indicate that
+ * the actual protocol is different (i.e. the protocol is
+ * ProtocolInfoProto) since they reuse the connection; in this case
+ * the declaringClassProtocolName field is set to the ProtocolInfoProto
+ *
+ */
+ public boolean hasDeclaringClassProtocolName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string declaringClassProtocolName = 2;
+ *
+ *
+ **
+ * RPCs for a particular interface (ie protocol) are done using a
+ * IPC connection that is setup using rpcProxy.
+ * The rpcProxy's has a declared protocol name that is
+ * sent form client to server at connection time.
+ *
+ * Each Rpc call also sends a protocol name
+ * (called declaringClassprotocolName). This name is usually the same
+ * as the connection protocol name except in some cases.
+ * For example metaProtocols such ProtocolInfoProto which get metainfo
+ * about the protocol reuse the connection but need to indicate that
+ * the actual protocol is different (i.e. the protocol is
+ * ProtocolInfoProto) since they reuse the connection; in this case
+ * the declaringClassProtocolName field is set to the ProtocolInfoProto
+ *
+ */
+ public java.lang.String getDeclaringClassProtocolName() {
+ java.lang.Object ref = declaringClassProtocolName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ declaringClassProtocolName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string declaringClassProtocolName = 2;
+ *
+ *
+ **
+ * RPCs for a particular interface (ie protocol) are done using a
+ * IPC connection that is setup using rpcProxy.
+ * The rpcProxy's has a declared protocol name that is
+ * sent form client to server at connection time.
+ *
+ * Each Rpc call also sends a protocol name
+ * (called declaringClassprotocolName). This name is usually the same
+ * as the connection protocol name except in some cases.
+ * For example metaProtocols such ProtocolInfoProto which get metainfo
+ * about the protocol reuse the connection but need to indicate that
+ * the actual protocol is different (i.e. the protocol is
+ * ProtocolInfoProto) since they reuse the connection; in this case
+ * the declaringClassProtocolName field is set to the ProtocolInfoProto
+ *
+ */
+ public com.google.protobuf.ByteString
+ getDeclaringClassProtocolNameBytes() {
+ java.lang.Object ref = declaringClassProtocolName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ declaringClassProtocolName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string declaringClassProtocolName = 2;
+ *
+ *
+ **
+ * RPCs for a particular interface (ie protocol) are done using a
+ * IPC connection that is setup using rpcProxy.
+ * The rpcProxy's has a declared protocol name that is
+ * sent form client to server at connection time.
+ *
+ * Each Rpc call also sends a protocol name
+ * (called declaringClassprotocolName). This name is usually the same
+ * as the connection protocol name except in some cases.
+ * For example metaProtocols such ProtocolInfoProto which get metainfo
+ * about the protocol reuse the connection but need to indicate that
+ * the actual protocol is different (i.e. the protocol is
+ * ProtocolInfoProto) since they reuse the connection; in this case
+ * the declaringClassProtocolName field is set to the ProtocolInfoProto
+ *
+ */
+ public Builder setDeclaringClassProtocolName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ declaringClassProtocolName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string declaringClassProtocolName = 2;
+ *
+ *
+ **
+ * RPCs for a particular interface (ie protocol) are done using a
+ * IPC connection that is setup using rpcProxy.
+ * The rpcProxy's has a declared protocol name that is
+ * sent form client to server at connection time.
+ *
+ * Each Rpc call also sends a protocol name
+ * (called declaringClassprotocolName). This name is usually the same
+ * as the connection protocol name except in some cases.
+ * For example metaProtocols such ProtocolInfoProto which get metainfo
+ * about the protocol reuse the connection but need to indicate that
+ * the actual protocol is different (i.e. the protocol is
+ * ProtocolInfoProto) since they reuse the connection; in this case
+ * the declaringClassProtocolName field is set to the ProtocolInfoProto
+ *
+ */
+ public Builder clearDeclaringClassProtocolName() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ declaringClassProtocolName_ = getDefaultInstance().getDeclaringClassProtocolName();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string declaringClassProtocolName = 2;
+ *
+ *
+ **
+ * RPCs for a particular interface (ie protocol) are done using a
+ * IPC connection that is setup using rpcProxy.
+ * The rpcProxy's has a declared protocol name that is
+ * sent form client to server at connection time.
+ *
+ * Each Rpc call also sends a protocol name
+ * (called declaringClassprotocolName). This name is usually the same
+ * as the connection protocol name except in some cases.
+ * For example metaProtocols such ProtocolInfoProto which get metainfo
+ * about the protocol reuse the connection but need to indicate that
+ * the actual protocol is different (i.e. the protocol is
+ * ProtocolInfoProto) since they reuse the connection; in this case
+ * the declaringClassProtocolName field is set to the ProtocolInfoProto
+ *
+ */
+ public Builder setDeclaringClassProtocolNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ declaringClassProtocolName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required uint64 clientProtocolVersion = 3;
+ private long clientProtocolVersion_ ;
+ /**
+ * required uint64 clientProtocolVersion = 3;
+ *
+ *
+ ** protocol version of class declaring the called method
+ *
+ */
+ public boolean hasClientProtocolVersion() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required uint64 clientProtocolVersion = 3;
+ *
+ *
+ ** protocol version of class declaring the called method
+ *
+ */
+ public long getClientProtocolVersion() {
+ return clientProtocolVersion_;
+ }
+ /**
+ * required uint64 clientProtocolVersion = 3;
+ *
+ *
+ ** protocol version of class declaring the called method
+ *
+ */
+ public Builder setClientProtocolVersion(long value) {
+ bitField0_ |= 0x00000004;
+ clientProtocolVersion_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required uint64 clientProtocolVersion = 3;
+ *
+ *
+ ** protocol version of class declaring the called method
+ *
+ */
+ public Builder clearClientProtocolVersion() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ clientProtocolVersion_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hadoop.common.RequestHeaderProto)
+ }
+
+ static {
+ defaultInstance = new RequestHeaderProto(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hadoop.common.RequestHeaderProto)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hadoop_common_RequestHeaderProto_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hadoop_common_RequestHeaderProto_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\027ProtobufRpcEngine.proto\022\rhadoop.common" +
+ "\"k\n\022RequestHeaderProto\022\022\n\nmethodName\030\001 \002" +
+ "(\t\022\"\n\032declaringClassProtocolName\030\002 \002(\t\022\035" +
+ "\n\025clientProtocolVersion\030\003 \002(\004B<\n\036org.apa" +
+ "che.hadoop.ipc.protobufB\027ProtobufRpcEngi" +
+ "neProtos\240\001\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_hadoop_common_RequestHeaderProto_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_hadoop_common_RequestHeaderProto_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hadoop_common_RequestHeaderProto_descriptor,
+ new java.lang.String[] { "MethodName", "DeclaringClassProtocolName", "ClientProtocolVersion", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 9751a9b66945c..b93581e8a7b98 100755
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -3869,6 +3869,7 @@ public Map getValByRegex(String regex) {
Pattern p = Pattern.compile(regex);
Map result = new HashMap();
+ List resultKeys = new ArrayList<>();
Matcher m;
for(Map.Entry item: getProps().entrySet()) {
@@ -3876,11 +3877,12 @@ public Map getValByRegex(String regex) {
item.getValue() instanceof String) {
m = p.matcher((String)item.getKey());
if(m.find()) { // match
- result.put((String) item.getKey(),
- substituteVars(getProps().getProperty((String) item.getKey())));
+ resultKeys.add((String) item.getKey());
}
}
}
+ resultKeys.forEach(item ->
+ result.put(item, substituteVars(getProps().getProperty(item))));
return result;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 1df68b647c99a..ec346b482a452 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -23,7 +23,6 @@
import java.lang.reflect.InvocationTargetException;
import java.net.URI;
import java.net.URISyntaxException;
-import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
@@ -1032,7 +1031,7 @@ public String getCanonicalServiceName() {
*/
@InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" })
public List> getDelegationTokens(String renewer) throws IOException {
- return new ArrayList>(0);
+ return Collections.emptyList();
}
/**
@@ -1383,4 +1382,34 @@ public boolean hasPathCapability(final Path path,
return false;
}
}
+
+ /**
+ * Create a multipart uploader.
+ * @param basePath file path under which all files are uploaded
+ * @return a MultipartUploaderBuilder object to build the uploader
+ * @throws IOException if some early checks cause IO failures.
+ * @throws UnsupportedOperationException if support is checked early.
+ */
+ @InterfaceStability.Unstable
+ public MultipartUploaderBuilder createMultipartUploader(Path basePath)
+ throws IOException {
+ methodNotSupported();
+ return null;
+ }
+
+ /**
+ * Helper method that throws an {@link UnsupportedOperationException} for the
+ * current {@link FileSystem} method being called.
+ */
+ protected final void methodNotSupported() {
+ // The order of the stacktrace elements is (from top to bottom):
+ // - java.lang.Thread.getStackTrace
+ // - org.apache.hadoop.fs.FileSystem.methodNotSupported
+ // -
+ // therefore, to find out the current method name, we use the element at
+ // index 2.
+ String name = Thread.currentThread().getStackTrace()[2].getMethodName();
+ throw new UnsupportedOperationException(getClass().getCanonicalName() +
+ " does not support method " + name);
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java
index fb46ef81e36fa..539b3e27c0351 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java
@@ -131,4 +131,12 @@ private CommonPathCapabilities() {
@InterfaceStability.Unstable
public static final String FS_EXPERIMENTAL_BATCH_LISTING =
"fs.capability.batch.listing";
+
+ /**
+ * Does the store support multipart uploading?
+ * Value: {@value}.
+ */
+ public static final String FS_MULTIPART_UPLOADER =
+ "fs.capability.multipart.uploader";
+
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index ba0064f0813d3..e5f4ef3809f18 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -66,6 +66,7 @@
import org.apache.hadoop.util.ShutdownHookManager;
import com.google.common.base.Preconditions;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -507,10 +508,9 @@ public static FileContext getLocalFSFileContext(final Configuration aConf)
return getFileContext(FsConstants.LOCAL_FS_URI, aConf);
}
- /* This method is needed for tests. */
+ @VisibleForTesting
@InterfaceAudience.Private
- @InterfaceStability.Unstable /* return type will change to AFS once
- HADOOP-6223 is completed */
+ @InterfaceStability.Unstable
public AbstractFileSystem getDefaultFileSystem() {
return defaultFS;
}
@@ -2957,4 +2957,31 @@ public boolean hasPathCapability(Path path, String capability)
(fs, p) -> fs.hasPathCapability(p, capability));
}
+ /**
+ * Return a set of server default configuration values based on path.
+ * @param path path to fetch server defaults
+ * @return server default configuration values for path
+ * @throws IOException an I/O error occurred
+ */
+ public FsServerDefaults getServerDefaults(final Path path)
+ throws IOException {
+ return FsLinkResolution.resolve(this,
+ fixRelativePart(path),
+ (fs, p) -> fs.getServerDefaults(p));
+ }
+
+ /**
+ * Create a multipart uploader.
+ * @param basePath file path under which all files are uploaded
+ * @return a MultipartUploaderBuilder object to build the uploader
+ * @throws IOException if some early checks cause IO failures.
+ * @throws UnsupportedOperationException if support is checked early.
+ */
+ @InterfaceStability.Unstable
+ public MultipartUploaderBuilder createMultipartUploader(Path basePath)
+ throws IOException {
+ return FsLinkResolution.resolve(this,
+ fixRelativePart(basePath),
+ (fs, p) -> fs.createMultipartUploader(p));
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index abb31ed869591..ab5040486dffc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -132,22 +132,35 @@
* New methods may be marked as Unstable or Evolving for their initial release,
* as a warning that they are new and may change based on the
* experience of use in applications.
+ *
* Important note for developers
- *
- * If you're making changes here to the public API or protected methods,
+ *
+ * If you are making changes here to the public API or protected methods,
* you must review the following subclasses and make sure that
* they are filtering/passing through new methods as appropriate.
+ *
*
- * {@link FilterFileSystem}: methods are passed through.
+ * {@link FilterFileSystem}: methods are passed through. If not,
+ * then {@code TestFilterFileSystem.MustNotImplement} must be
+ * updated with the unsupported interface.
+ * Furthermore, if the new API's support is probed for via
+ * {@link #hasPathCapability(Path, String)} then
+ * {@link FilterFileSystem#hasPathCapability(Path, String)}
+ * must return false, always.
+ *
* {@link ChecksumFileSystem}: checksums are created and
* verified.
+ *
* {@code TestHarFileSystem} will need its {@code MustNotImplement}
* interface updated.
+ *
*
* There are some external places your changes will break things.
* Do co-ordinate changes here.
+ *
*
* HBase: HBoss
+ *
* Hive: HiveShim23
* {@code shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java}
*
@@ -2695,7 +2708,7 @@ static void checkAccessPermissions(FileStatus stat, FsAction mode)
if (perm.getUserAction().implies(mode)) {
return;
}
- } else if (ugi.getGroups().contains(stat.getGroup())) {
+ } else if (ugi.getGroupsSet().contains(stat.getGroup())) {
if (perm.getGroupAction().implies(mode)) {
return;
}
@@ -4644,4 +4657,17 @@ public CompletableFuture build() throws IOException {
}
+ /**
+ * Create a multipart uploader.
+ * @param basePath file path under which all files are uploaded
+ * @return a MultipartUploaderBuilder object to build the uploader
+ * @throws IOException if some early checks cause IO failures.
+ * @throws UnsupportedOperationException if support is checked early.
+ */
+ @InterfaceStability.Unstable
+ public MultipartUploaderBuilder createMultipartUploader(Path basePath)
+ throws IOException {
+ methodNotSupported();
+ return null;
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index cf12ea3898a7f..42410974db17c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -41,6 +41,8 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Progressable;
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
+
/****************************************************************
* A FilterFileSystem contains
* some other file system, which it uses as
@@ -728,7 +730,16 @@ protected CompletableFuture openFileWithOptions(
@Override
public boolean hasPathCapability(final Path path, final String capability)
throws IOException {
- return fs.hasPathCapability(path, capability);
+ switch (validatePathCapabilityArgs(makeQualified(path), capability)) {
+ case CommonPathCapabilities.FS_MULTIPART_UPLOADER:
+ case CommonPathCapabilities.FS_EXPERIMENTAL_BATCH_LISTING:
+ // operations known to be unsupported, irrespective of what
+ // the wrapped class implements.
+ return false;
+ default:
+ // the feature is not implemented.
+ return fs.hasPathCapability(path, capability);
+ }
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
index e197506edc88b..27e75d8a25df6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
@@ -448,4 +448,10 @@ public boolean hasPathCapability(final Path path,
throws IOException {
return myFs.hasPathCapability(path, capability);
}
+
+ @Override
+ public MultipartUploaderBuilder createMultipartUploader(final Path basePath)
+ throws IOException {
+ return myFs.createMultipartUploader(basePath);
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java
index 07c16b22358c1..344048f0ceeb1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java
@@ -44,4 +44,6 @@ public interface FsConstants {
public static final String VIEWFS_SCHEME = "viewfs";
String FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN =
"fs.viewfs.overload.scheme.target.%s.impl";
+ String VIEWFS_TYPE = "viewfs";
+ String VIEWFSOS_TYPE = "viewfsOverloadScheme";
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java
index 76e379c51f605..1a8a77723176e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java
@@ -112,7 +112,7 @@ protected void processPath(PathData item) throws IOException {
// used by chown/chgrp
static private String allowedChars = Shell.WINDOWS ? "[-_./@a-zA-Z0-9 ]" :
- "[-_./@a-zA-Z0-9]";
+ "[-+_./@a-zA-Z0-9]";
/**
* Used to change owner and/or group of files
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InternalOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InternalOperations.java
new file mode 100644
index 0000000000000..2db33eead9288
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InternalOperations.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+
+/**
+ * This method allows access to Package-scoped operations from classes
+ * in org.apache.hadoop.fs.impl and other file system implementations
+ * in the hadoop modules.
+ * This is absolutely not for used by any other application or library.
+ */
+@InterfaceAudience.Private
+public class InternalOperations {
+
+ @SuppressWarnings("deprecation") // rename w/ OVERWRITE
+ public void rename(FileSystem fs, final Path src, final Path dst,
+ final Options.Rename...options) throws IOException {
+ fs.rename(src, dst, options);
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
index 7ed987eed90dd..89848dc29ded0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -15,45 +15,26 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package org.apache.hadoop.fs;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import java.util.concurrent.CompletableFuture;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import static com.google.common.base.Preconditions.checkArgument;
-
/**
* MultipartUploader is an interface for copying files multipart and across
- * multiple nodes. Users should:
- *
- * Initialize an upload.
- * Upload parts in any order.
- * Complete the upload in order to have it materialize in the destination
- * FS.
- *
+ * multiple nodes.
*/
-@InterfaceAudience.Private
+@InterfaceAudience.Public
@InterfaceStability.Unstable
-public abstract class MultipartUploader implements Closeable {
- public static final Logger LOG =
- LoggerFactory.getLogger(MultipartUploader.class);
+public interface MultipartUploader extends Closeable {
- /**
- * Perform any cleanup.
- * The upload is not required to support any operations after this.
- * @throws IOException problems on close.
- */
- @Override
- public void close() throws IOException {
- }
/**
* Initialize a multipart upload.
@@ -61,94 +42,64 @@ public void close() throws IOException {
* @return unique identifier associating part uploads.
* @throws IOException IO failure
*/
- public abstract UploadHandle initialize(Path filePath) throws IOException;
+ CompletableFuture startUpload(Path filePath)
+ throws IOException;
/**
* Put part as part of a multipart upload.
* It is possible to have parts uploaded in any order (or in parallel).
- * @param filePath Target path for upload (same as {@link #initialize(Path)}).
+ * @param uploadId Identifier from {@link #startUpload(Path)}.
+ * @param partNumber Index of the part relative to others.
+ * @param filePath Target path for upload (as {@link #startUpload(Path)}).
* @param inputStream Data for this part. Implementations MUST close this
* stream after reading in the data.
- * @param partNumber Index of the part relative to others.
- * @param uploadId Identifier from {@link #initialize(Path)}.
* @param lengthInBytes Target length to read from the stream.
* @return unique PartHandle identifier for the uploaded part.
* @throws IOException IO failure
*/
- public abstract PartHandle putPart(Path filePath, InputStream inputStream,
- int partNumber, UploadHandle uploadId, long lengthInBytes)
+ CompletableFuture putPart(
+ UploadHandle uploadId,
+ int partNumber,
+ Path filePath,
+ InputStream inputStream,
+ long lengthInBytes)
throws IOException;
/**
* Complete a multipart upload.
- * @param filePath Target path for upload (same as {@link #initialize(Path)}.
+ * @param uploadId Identifier from {@link #startUpload(Path)}.
+ * @param filePath Target path for upload (as {@link #startUpload(Path)}.
* @param handles non-empty map of part number to part handle.
- * from {@link #putPart(Path, InputStream, int, UploadHandle, long)}.
- * @param multipartUploadId Identifier from {@link #initialize(Path)}.
+ * from {@link #putPart(UploadHandle, int, Path, InputStream, long)}.
* @return unique PathHandle identifier for the uploaded file.
* @throws IOException IO failure
*/
- public abstract PathHandle complete(Path filePath,
- Map handles,
- UploadHandle multipartUploadId)
+ CompletableFuture complete(
+ UploadHandle uploadId,
+ Path filePath,
+ Map handles)
throws IOException;
/**
* Aborts a multipart upload.
- * @param filePath Target path for upload (same as {@link #initialize(Path)}.
- * @param multipartUploadId Identifier from {@link #initialize(Path)}.
+ * @param uploadId Identifier from {@link #startUpload(Path)}.
+ * @param filePath Target path for upload (same as {@link #startUpload(Path)}.
* @throws IOException IO failure
+ * @return a future; the operation will have completed
*/
- public abstract void abort(Path filePath, UploadHandle multipartUploadId)
+ CompletableFuture abort(UploadHandle uploadId, Path filePath)
throws IOException;
/**
- * Utility method to validate uploadIDs.
- * @param uploadId Upload ID
- * @throws IllegalArgumentException invalid ID
- */
- protected void checkUploadId(byte[] uploadId)
- throws IllegalArgumentException {
- checkArgument(uploadId != null, "null uploadId");
- checkArgument(uploadId.length > 0,
- "Empty UploadId is not valid");
- }
-
- /**
- * Utility method to validate partHandles.
- * @param partHandles handles
- * @throws IllegalArgumentException if the parts are invalid
+ * Best effort attempt to aborts multipart uploads under a path.
+ * Not all implementations support this, and those which do may
+ * be vulnerable to eventually consistent listings of current uploads
+ * -some may be missed.
+ * @param path path to abort uploads under.
+ * @return a future to the number of entries aborted;
+ * -1 if aborting is unsupported
+ * @throws IOException IO failure
*/
- protected void checkPartHandles(Map partHandles) {
- checkArgument(!partHandles.isEmpty(),
- "Empty upload");
- partHandles.keySet()
- .stream()
- .forEach(key ->
- checkArgument(key > 0,
- "Invalid part handle index %s", key));
- }
+ CompletableFuture abortUploadsUnderPath(Path path) throws IOException;
- /**
- * Check all the arguments to the
- * {@link #putPart(Path, InputStream, int, UploadHandle, long)} operation.
- * @param filePath Target path for upload (same as {@link #initialize(Path)}).
- * @param inputStream Data for this part. Implementations MUST close this
- * stream after reading in the data.
- * @param partNumber Index of the part relative to others.
- * @param uploadId Identifier from {@link #initialize(Path)}.
- * @param lengthInBytes Target length to read from the stream.
- * @throws IllegalArgumentException invalid argument
- */
- protected void checkPutArguments(Path filePath,
- InputStream inputStream,
- int partNumber,
- UploadHandle uploadId,
- long lengthInBytes) throws IllegalArgumentException {
- checkArgument(filePath != null, "null filePath");
- checkArgument(inputStream != null, "null inputStream");
- checkArgument(partNumber > 0, "Invalid part number: %d", partNumber);
- checkArgument(uploadId != null, "null uploadId");
- checkArgument(lengthInBytes >= 0, "Invalid part length: %d", lengthInBytes);
- }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderBuilder.java
new file mode 100644
index 0000000000000..381bfaa07f6d1
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderBuilder.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.permission.FsPermission;
+
+/**
+ * Builder interface for Multipart readers.
+ * @param
+ * @param
+ */
+public interface MultipartUploaderBuilder>
+ extends FSBuilder {
+
+ /**
+ * Set permission for the file.
+ */
+ B permission(@Nonnull FsPermission perm);
+
+ /**
+ * Set the size of the buffer to be used.
+ */
+ B bufferSize(int bufSize);
+
+ /**
+ * Set replication factor.
+ */
+ B replication(short replica);
+
+ /**
+ * Set block size.
+ */
+ B blockSize(long blkSize);
+
+ /**
+ * Create an FSDataOutputStream at the specified path.
+ */
+ B create();
+
+ /**
+ * Set to true to overwrite the existing file.
+ * Set it to false, an exception will be thrown when calling {@link #build()}
+ * if the file exists.
+ */
+ B overwrite(boolean overwrite);
+
+ /**
+ * Append to an existing file (optional operation).
+ */
+ B append();
+
+ /**
+ * Set checksum opt.
+ */
+ B checksumOpt(@Nonnull Options.ChecksumOpt chksumOpt);
+
+ /**
+ * Create the FSDataOutputStream to write on the file system.
+ *
+ * @throws IllegalArgumentException if the parameters are not valid.
+ * @throws IOException on errors when file system creates or appends the file.
+ */
+ S build() throws IllegalArgumentException, IOException;
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java
deleted file mode 100644
index e35b6bf18bbd6..0000000000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.ServiceLoader;
-
-/**
- * {@link ServiceLoader}-driven uploader API for storage services supporting
- * multipart uploads.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public abstract class MultipartUploaderFactory {
- public static final Logger LOG =
- LoggerFactory.getLogger(MultipartUploaderFactory.class);
-
- /**
- * Multipart Uploaders listed as services.
- */
- private static ServiceLoader serviceLoader =
- ServiceLoader.load(MultipartUploaderFactory.class,
- MultipartUploaderFactory.class.getClassLoader());
-
- // Iterate through the serviceLoader to avoid lazy loading.
- // Lazy loading would require synchronization in concurrent use cases.
- static {
- Iterator iterServices = serviceLoader.iterator();
- while (iterServices.hasNext()) {
- iterServices.next();
- }
- }
-
- /**
- * Get the multipart loader for a specific filesystem.
- * @param fs filesystem
- * @param conf configuration
- * @return an uploader, or null if one was found.
- * @throws IOException failure during the creation process.
- */
- public static MultipartUploader get(FileSystem fs, Configuration conf)
- throws IOException {
- MultipartUploader mpu = null;
- for (MultipartUploaderFactory factory : serviceLoader) {
- mpu = factory.createMultipartUploader(fs, conf);
- if (mpu != null) {
- break;
- }
- }
- return mpu;
- }
-
- protected abstract MultipartUploader createMultipartUploader(FileSystem fs,
- Configuration conf) throws IOException;
-}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index cf2210575da15..72eeb99a4ea5d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -64,6 +64,7 @@
public class RawLocalFileSystem extends FileSystem {
static final URI NAME = URI.create("file:///");
private Path workingDir;
+ private long defaultBlockSize;
// Temporary workaround for HADOOP-9652.
private static boolean useDeprecatedFileStatus = true;
@@ -100,6 +101,7 @@ public File pathToFile(Path path) {
public void initialize(URI uri, Configuration conf) throws IOException {
super.initialize(uri, conf);
setConf(conf);
+ defaultBlockSize = getDefaultBlockSize(new Path(uri));
}
/*******************************************************
@@ -518,7 +520,12 @@ public FileStatus[] listStatus(Path f) throws IOException {
}
return new FileStatus[] {
new DeprecatedRawLocalFileStatus(localf,
- getDefaultBlockSize(f), this) };
+ defaultBlockSize, this) };
+ }
+
+ @Override
+ public boolean exists(Path f) throws IOException {
+ return pathToFile(f).exists();
}
protected boolean mkOneDir(File p2f) throws IOException {
@@ -663,7 +670,7 @@ private FileStatus deprecatedGetFileStatus(Path f) throws IOException {
File path = pathToFile(f);
if (path.exists()) {
return new DeprecatedRawLocalFileStatus(pathToFile(f),
- getDefaultBlockSize(f), this);
+ defaultBlockSize, this);
} else {
throw new FileNotFoundException("File " + f + " does not exist");
}
@@ -1051,7 +1058,7 @@ private FileStatus deprecatedGetFileLinkStatusInternal(final Path f)
private FileStatus getNativeFileLinkStatus(final Path f,
boolean dereference) throws IOException {
checkPath(f);
- Stat stat = new Stat(f, getDefaultBlockSize(f), dereference, this);
+ Stat stat = new Stat(f, defaultBlockSize, dereference, this);
FileStatus status = stat.getFileStatus();
return status;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java
index 5e80a140175e6..f6c2f2af1c9ba 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java
@@ -20,8 +20,7 @@
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
+import java.util.Collections;
import java.util.NoSuchElementException;
import java.util.StringTokenizer;
@@ -65,9 +64,7 @@ public Stat(Path path, long blockSize, boolean deref, FileSystem fs)
this.blockSize = blockSize;
this.dereference = deref;
// LANG = C setting
- Map env = new HashMap();
- env.put("LANG", "C");
- setEnvironment(env);
+ setEnvironment(Collections.singletonMap("LANG", "C"));
}
public FileStatus getFileStatus() throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java
new file mode 100644
index 0000000000000..d8b7fe0744087
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.impl;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.CompletableFuture;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.hadoop.fs.MultipartUploader;
+import org.apache.hadoop.fs.PartHandle;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UploadHandle;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+/**
+ * Standard base class for Multipart Uploaders.
+ */
+public abstract class AbstractMultipartUploader implements MultipartUploader {
+
+ /**
+ * Base path of upload.
+ */
+ private final Path basePath;
+
+ /**
+ * Instantiate.
+ * @param basePath base path
+ */
+ protected AbstractMultipartUploader(final Path basePath) {
+ this.basePath = Objects.requireNonNull(basePath, "null path");
+ }
+
+ /**
+ * Perform any cleanup.
+ * The upload is not required to support any operations after this.
+ * @throws IOException problems on close.
+ */
+ @Override
+ public void close() throws IOException {
+ }
+
+ protected Path getBasePath() {
+ return basePath;
+ }
+
+ /**
+ * Validate a path.
+ * @param path path to check.
+ */
+ protected void checkPath(Path path) {
+ Objects.requireNonNull(path, "null path");
+ Preconditions.checkArgument(path.toString().startsWith(basePath.toString()),
+ "Path %s is not under %s", path, basePath);
+ }
+
+ /**
+ * Utility method to validate uploadIDs.
+ * @param uploadId Upload ID
+ * @throws IllegalArgumentException invalid ID
+ */
+ protected void checkUploadId(byte[] uploadId)
+ throws IllegalArgumentException {
+ checkArgument(uploadId != null, "null uploadId");
+ checkArgument(uploadId.length > 0,
+ "Empty UploadId is not valid");
+ }
+
+ /**
+ * Utility method to validate partHandles.
+ * @param partHandles handles
+ * @throws IllegalArgumentException if the parts are invalid
+ */
+ protected void checkPartHandles(Map partHandles) {
+ checkArgument(!partHandles.isEmpty(),
+ "Empty upload");
+ partHandles.keySet()
+ .stream()
+ .forEach(key ->
+ checkArgument(key > 0,
+ "Invalid part handle index %s", key));
+ }
+
+ /**
+ * Check all the arguments to the
+ * {@link MultipartUploader#putPart(UploadHandle, int, Path, InputStream, long)}
+ * operation.
+ * @param filePath Target path for upload (as {@link #startUpload(Path)}).
+ * @param inputStream Data for this part. Implementations MUST close this
+ * stream after reading in the data.
+ * @param partNumber Index of the part relative to others.
+ * @param uploadId Identifier from {@link #startUpload(Path)}.
+ * @param lengthInBytes Target length to read from the stream.
+ * @throws IllegalArgumentException invalid argument
+ */
+ protected void checkPutArguments(Path filePath,
+ InputStream inputStream,
+ int partNumber,
+ UploadHandle uploadId,
+ long lengthInBytes) throws IllegalArgumentException {
+ checkPath(filePath);
+ checkArgument(inputStream != null, "null inputStream");
+ checkArgument(partNumber > 0, "Invalid part number: %d", partNumber);
+ checkArgument(uploadId != null, "null uploadId");
+ checkArgument(lengthInBytes >= 0, "Invalid part length: %d", lengthInBytes);
+ }
+
+ /**
+ * {@inheritDoc}.
+ * @param path path to abort uploads under.
+ * @return a future to -1.
+ * @throws IOException
+ */
+ public CompletableFuture abortUploadsUnderPath(Path path)
+ throws IOException {
+ checkPath(path);
+ CompletableFuture f = new CompletableFuture<>();
+ f.complete(-1);
+ return f;
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java
similarity index 52%
rename from hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java
index b77c244220a9e..ae0def0e378d4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java
@@ -14,24 +14,42 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.fs;
+
+package org.apache.hadoop.fs.impl;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Comparator;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.UUID;
+import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.commons.compress.utils.IOUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BBPartHandle;
+import org.apache.hadoop.fs.BBUploadHandle;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSDataOutputStreamBuilder;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.InternalOperations;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.PartHandle;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathHandle;
+import org.apache.hadoop.fs.UploadHandle;
import org.apache.hadoop.fs.permission.FsPermission;
import static org.apache.hadoop.fs.Path.mergePaths;
@@ -50,40 +68,82 @@
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
-public class FileSystemMultipartUploader extends MultipartUploader {
+public class FileSystemMultipartUploader extends AbstractMultipartUploader {
+
+ private static final Logger LOG = LoggerFactory.getLogger(
+ FileSystemMultipartUploader.class);
private final FileSystem fs;
- public FileSystemMultipartUploader(FileSystem fs) {
+ private final FileSystemMultipartUploaderBuilder builder;
+
+ private final FsPermission permission;
+
+ private final long blockSize;
+
+ private final Options.ChecksumOpt checksumOpt;
+
+ public FileSystemMultipartUploader(
+ final FileSystemMultipartUploaderBuilder builder,
+ FileSystem fs) {
+ super(builder.getPath());
+ this.builder = builder;
this.fs = fs;
+ blockSize = builder.getBlockSize();
+ checksumOpt = builder.getChecksumOpt();
+ permission = builder.getPermission();
}
@Override
- public UploadHandle initialize(Path filePath) throws IOException {
- Path collectorPath = createCollectorPath(filePath);
- fs.mkdirs(collectorPath, FsPermission.getDirDefault());
+ public CompletableFuture startUpload(Path filePath)
+ throws IOException {
+ checkPath(filePath);
+ return FutureIOSupport.eval(() -> {
+ Path collectorPath = createCollectorPath(filePath);
+ fs.mkdirs(collectorPath, FsPermission.getDirDefault());
- ByteBuffer byteBuffer = ByteBuffer.wrap(
- collectorPath.toString().getBytes(Charsets.UTF_8));
- return BBUploadHandle.from(byteBuffer);
+ ByteBuffer byteBuffer = ByteBuffer.wrap(
+ collectorPath.toString().getBytes(Charsets.UTF_8));
+ return BBUploadHandle.from(byteBuffer);
+ });
}
@Override
- public PartHandle putPart(Path filePath, InputStream inputStream,
- int partNumber, UploadHandle uploadId, long lengthInBytes)
+ public CompletableFuture putPart(UploadHandle uploadId,
+ int partNumber, Path filePath,
+ InputStream inputStream,
+ long lengthInBytes)
throws IOException {
checkPutArguments(filePath, inputStream, partNumber, uploadId,
lengthInBytes);
+ return FutureIOSupport.eval(() -> innerPutPart(filePath,
+ inputStream, partNumber, uploadId, lengthInBytes));
+ }
+
+ private PartHandle innerPutPart(Path filePath,
+ InputStream inputStream,
+ int partNumber,
+ UploadHandle uploadId,
+ long lengthInBytes)
+ throws IOException {
byte[] uploadIdByteArray = uploadId.toByteArray();
checkUploadId(uploadIdByteArray);
Path collectorPath = new Path(new String(uploadIdByteArray, 0,
uploadIdByteArray.length, Charsets.UTF_8));
Path partPath =
mergePaths(collectorPath, mergePaths(new Path(Path.SEPARATOR),
- new Path(Integer.toString(partNumber) + ".part")));
- try(FSDataOutputStream fsDataOutputStream =
- fs.createFile(partPath).build()) {
- IOUtils.copy(inputStream, fsDataOutputStream, 4096);
+ new Path(partNumber + ".part")));
+ final FSDataOutputStreamBuilder fileBuilder = fs.createFile(partPath);
+ if (checksumOpt != null) {
+ fileBuilder.checksumOpt(checksumOpt);
+ }
+ if (permission != null) {
+ fileBuilder.permission(permission);
+ }
+ try (FSDataOutputStream fsDataOutputStream =
+ fileBuilder.blockSize(blockSize).build()) {
+ IOUtils.copy(inputStream, fsDataOutputStream,
+ this.builder.getBufferSize());
} finally {
cleanupWithLogger(LOG, inputStream);
}
@@ -106,16 +166,36 @@ private PathHandle getPathHandle(Path filePath) throws IOException {
private long totalPartsLen(List partHandles) throws IOException {
long totalLen = 0;
- for (Path p: partHandles) {
+ for (Path p : partHandles) {
totalLen += fs.getFileStatus(p).getLen();
}
return totalLen;
}
@Override
- @SuppressWarnings("deprecation") // rename w/ OVERWRITE
- public PathHandle complete(Path filePath, Map handleMap,
- UploadHandle multipartUploadId) throws IOException {
+ public CompletableFuture complete(
+ UploadHandle uploadId,
+ Path filePath,
+ Map handleMap) throws IOException {
+
+ checkPath(filePath);
+ return FutureIOSupport.eval(() ->
+ innerComplete(uploadId, filePath, handleMap));
+ }
+
+ /**
+ * The upload complete operation.
+ * @param multipartUploadId the ID of the upload
+ * @param filePath path
+ * @param handleMap map of handles
+ * @return the path handle
+ * @throws IOException failure
+ */
+ private PathHandle innerComplete(
+ UploadHandle multipartUploadId, Path filePath,
+ Map handleMap) throws IOException {
+
+ checkPath(filePath);
checkUploadId(multipartUploadId.toByteArray());
@@ -133,6 +213,13 @@ public PathHandle complete(Path filePath, Map handleMap,
})
.collect(Collectors.toList());
+ int count = partHandles.size();
+ // built up to identify duplicates -if the size of this set is
+ // below that of the number of parts, then there's a duplicate entry.
+ Set values = new HashSet<>(count);
+ values.addAll(partHandles);
+ Preconditions.checkArgument(values.size() == count,
+ "Duplicate PartHandles");
byte[] uploadIdByteArray = multipartUploadId.toByteArray();
Path collectorPath = new Path(new String(uploadIdByteArray, 0,
uploadIdByteArray.length, Charsets.UTF_8));
@@ -146,35 +233,30 @@ public PathHandle complete(Path filePath, Map handleMap,
fs.create(filePathInsideCollector).close();
fs.concat(filePathInsideCollector,
partHandles.toArray(new Path[handles.size()]));
- fs.rename(filePathInsideCollector, filePath, Options.Rename.OVERWRITE);
+ new InternalOperations()
+ .rename(fs, filePathInsideCollector, filePath,
+ Options.Rename.OVERWRITE);
}
fs.delete(collectorPath, true);
return getPathHandle(filePath);
}
@Override
- public void abort(Path filePath, UploadHandle uploadId) throws IOException {
+ public CompletableFuture abort(UploadHandle uploadId,
+ Path filePath)
+ throws IOException {
+ checkPath(filePath);
byte[] uploadIdByteArray = uploadId.toByteArray();
checkUploadId(uploadIdByteArray);
Path collectorPath = new Path(new String(uploadIdByteArray, 0,
uploadIdByteArray.length, Charsets.UTF_8));
- // force a check for a file existing; raises FNFE if not found
- fs.getFileStatus(collectorPath);
- fs.delete(collectorPath, true);
- }
-
- /**
- * Factory for creating MultipartUploaderFactory objects for file://
- * filesystems.
- */
- public static class Factory extends MultipartUploaderFactory {
- protected MultipartUploader createMultipartUploader(FileSystem fs,
- Configuration conf) {
- if (fs.getScheme().equals("file")) {
- return new FileSystemMultipartUploader(fs);
- }
+ return FutureIOSupport.eval(() -> {
+ // force a check for a file existing; raises FNFE if not found
+ fs.getFileStatus(collectorPath);
+ fs.delete(collectorPath, true);
return null;
- }
+ });
}
+
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploaderBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploaderBuilder.java
new file mode 100644
index 0000000000000..7c4d995c69d1b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploaderBuilder.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.impl;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+/**
+ * Builder for {@link FileSystemMultipartUploader}.
+ */
+public class FileSystemMultipartUploaderBuilder extends
+ MultipartUploaderBuilderImpl {
+
+ public FileSystemMultipartUploaderBuilder(
+ @Nonnull final FileSystem fileSystem,
+ @Nonnull final Path path) {
+ super(fileSystem, path);
+ }
+
+ @Override
+ public FileSystemMultipartUploaderBuilder getThisBuilder() {
+ return this;
+ }
+
+ @Override
+ public FileSystemMultipartUploader build()
+ throws IllegalArgumentException, IOException {
+ return new FileSystemMultipartUploader(this, getFS());
+ }
+
+ @Override
+ public FileSystem getFS() {
+ return super.getFS();
+ }
+
+ @Override
+ public FsPermission getPermission() {
+ return super.getPermission();
+ }
+
+ @Override
+ public int getBufferSize() {
+ return super.getBufferSize();
+ }
+
+ @Override
+ public short getReplication() {
+ return super.getReplication();
+ }
+
+ @Override
+ public EnumSet getFlags() {
+ return super.getFlags();
+ }
+
+ @Override
+ public Options.ChecksumOpt getChecksumOpt() {
+ return super.getChecksumOpt();
+ }
+
+ @Override
+ protected long getBlockSize() {
+ return super.getBlockSize();
+ }
+
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java
index 26856e5b935e0..f13d701803d7e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java
@@ -21,6 +21,7 @@
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.Map;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
@@ -52,7 +53,7 @@ private FutureIOSupport() {
* @throws IOException if something went wrong
* @throws RuntimeException any nested RTE thrown
*/
- public static T awaitFuture(final Future future)
+ public static T awaitFuture(final Future future)
throws InterruptedIOException, IOException, RuntimeException {
try {
return future.get();
@@ -224,4 +225,29 @@ public static void propagateOptions(
}
}
}
+
+ /**
+ * Evaluate a CallableRaisingIOE in the current thread,
+ * converting IOEs to RTEs and propagating.
+ * @param callable callable to invoke
+ * @param Return type.
+ * @return the evaluated result.
+ * @throws UnsupportedOperationException fail fast if unsupported
+ * @throws IllegalArgumentException invalid argument
+ */
+ public static CompletableFuture eval(
+ FunctionsRaisingIOE.CallableRaisingIOE callable) {
+ CompletableFuture result = new CompletableFuture<>();
+ try {
+ result.complete(callable.apply());
+ } catch (UnsupportedOperationException | IllegalArgumentException tx) {
+ // fail fast here
+ throw tx;
+ } catch (Throwable tx) {
+ // fail lazily here to ensure callers expect all File IO operations to
+ // surface later
+ result.completeExceptionally(tx);
+ }
+ return result;
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java
new file mode 100644
index 0000000000000..6c3336e6882b3
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.impl;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.MultipartUploader;
+import org.apache.hadoop.fs.MultipartUploaderBuilder;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
+/**
+ * Builder for {@link MultipartUploader} implementations.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class MultipartUploaderBuilderImpl
+ >
+ extends AbstractFSBuilderImpl
+ implements MultipartUploaderBuilder {
+
+ private final FileSystem fs;
+
+ private FsPermission permission;
+
+ private int bufferSize;
+
+ private short replication;
+
+ private long blockSize;
+
+ private final EnumSet flags = EnumSet.noneOf(CreateFlag.class);
+
+ private ChecksumOpt checksumOpt;
+
+ /**
+ * Return the concrete implementation of the builder instance.
+ */
+ public abstract B getThisBuilder();
+
+ /**
+ * Construct from a {@link FileContext}.
+ *
+ * @param fc FileContext
+ * @param p path.
+ * @throws IOException failure
+ */
+ protected MultipartUploaderBuilderImpl(@Nonnull FileContext fc,
+ @Nonnull Path p) throws IOException {
+ super(checkNotNull(p));
+ checkNotNull(fc);
+ this.fs = null;
+
+ FsServerDefaults defaults = fc.getServerDefaults(p);
+ bufferSize = defaults.getFileBufferSize();
+ replication = defaults.getReplication();
+ blockSize = defaults.getBlockSize();
+ }
+
+ /**
+ * Constructor.
+ */
+ protected MultipartUploaderBuilderImpl(@Nonnull FileSystem fileSystem,
+ @Nonnull Path p) {
+ super(fileSystem.makeQualified(checkNotNull(p)));
+ checkNotNull(fileSystem);
+ fs = fileSystem;
+ bufferSize = fs.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
+ IO_FILE_BUFFER_SIZE_DEFAULT);
+ replication = fs.getDefaultReplication(p);
+ blockSize = fs.getDefaultBlockSize(p);
+ }
+
+ protected FileSystem getFS() {
+ checkNotNull(fs);
+ return fs;
+ }
+
+ protected FsPermission getPermission() {
+ if (permission == null) {
+ permission = FsPermission.getFileDefault();
+ }
+ return permission;
+ }
+
+ /**
+ * Set permission for the file.
+ */
+ @Override
+ public B permission(@Nonnull final FsPermission perm) {
+ checkNotNull(perm);
+ permission = perm;
+ return getThisBuilder();
+ }
+
+ protected int getBufferSize() {
+ return bufferSize;
+ }
+
+ /**
+ * Set the size of the buffer to be used.
+ */
+ @Override
+ public B bufferSize(int bufSize) {
+ bufferSize = bufSize;
+ return getThisBuilder();
+ }
+
+ protected short getReplication() {
+ return replication;
+ }
+
+ /**
+ * Set replication factor.
+ */
+ @Override
+ public B replication(short replica) {
+ replication = replica;
+ return getThisBuilder();
+ }
+
+ protected long getBlockSize() {
+ return blockSize;
+ }
+
+ /**
+ * Set block size.
+ */
+ @Override
+ public B blockSize(long blkSize) {
+ blockSize = blkSize;
+ return getThisBuilder();
+ }
+
+ protected EnumSet getFlags() {
+ return flags;
+ }
+
+ /**
+ * Create an FSDataOutputStream at the specified path.
+ */
+ @Override
+ public B create() {
+ flags.add(CreateFlag.CREATE);
+ return getThisBuilder();
+ }
+
+ /**
+ * Set to true to overwrite the existing file.
+ * Set it to false, an exception will be thrown when calling {@link #build()}
+ * if the file exists.
+ */
+ @Override
+ public B overwrite(boolean overwrite) {
+ if (overwrite) {
+ flags.add(CreateFlag.OVERWRITE);
+ } else {
+ flags.remove(CreateFlag.OVERWRITE);
+ }
+ return getThisBuilder();
+ }
+
+ /**
+ * Append to an existing file (optional operation).
+ */
+ @Override
+ public B append() {
+ flags.add(CreateFlag.APPEND);
+ return getThisBuilder();
+ }
+
+ protected ChecksumOpt getChecksumOpt() {
+ return checksumOpt;
+ }
+
+ /**
+ * Set checksum opt.
+ */
+ @Override
+ public B checksumOpt(@Nonnull final ChecksumOpt chksumOpt) {
+ checkNotNull(chksumOpt);
+ checksumOpt = chksumOpt;
+ return getThisBuilder();
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
index 3c9368ca2ed9b..184b674adcc27 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
@@ -20,6 +20,7 @@
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
@@ -97,7 +98,7 @@ protected List expandArgument(String arg) throws IOException {
throw e;
}
// prevent -f on a non-existent glob from failing
- return new LinkedList();
+ return Collections.emptyList();
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
index 6596527738058..64aade3df9539 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
@@ -128,7 +128,8 @@ private void addToUsagesTable(URI uri, FsStatus fsStatus,
@Override
protected void processPath(PathData item) throws IOException {
- if (ViewFileSystemUtil.isViewFileSystem(item.fs)) {
+ if (ViewFileSystemUtil.isViewFileSystem(item.fs)
+ || ViewFileSystemUtil.isViewFileSystemOverloadScheme(item.fs)) {
ViewFileSystem viewFileSystem = (ViewFileSystem) item.fs;
Map fsStatusMap =
ViewFileSystemUtil.getStatus(viewFileSystem, item.path);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
index 6dd1f6589478e..7d29b8f44ca62 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
@@ -66,8 +66,7 @@ public static void addLink(Configuration conf, final String mountTableName,
*/
public static void addLink(final Configuration conf, final String src,
final URI target) {
- addLink( conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE,
- src, target);
+ addLink(conf, getDefaultMountTableName(conf), src, target);
}
/**
@@ -88,8 +87,7 @@ public static void addLinkMergeSlash(Configuration conf,
* @param target
*/
public static void addLinkMergeSlash(Configuration conf, final URI target) {
- addLinkMergeSlash(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE,
- target);
+ addLinkMergeSlash(conf, getDefaultMountTableName(conf), target);
}
/**
@@ -110,8 +108,7 @@ public static void addLinkFallback(Configuration conf,
* @param target
*/
public static void addLinkFallback(Configuration conf, final URI target) {
- addLinkFallback(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE,
- target);
+ addLinkFallback(conf, getDefaultMountTableName(conf), target);
}
/**
@@ -132,7 +129,7 @@ public static void addLinkMerge(Configuration conf,
* @param targets
*/
public static void addLinkMerge(Configuration conf, final URI[] targets) {
- addLinkMerge(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, targets);
+ addLinkMerge(conf, getDefaultMountTableName(conf), targets);
}
/**
@@ -166,8 +163,7 @@ public static void addLinkNfly(Configuration conf, String mountTableName,
public static void addLinkNfly(final Configuration conf, final String src,
final URI ... targets) {
- addLinkNfly(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, src, null,
- targets);
+ addLinkNfly(conf, getDefaultMountTableName(conf), src, null, targets);
}
/**
@@ -177,8 +173,7 @@ public static void addLinkNfly(final Configuration conf, final String src,
*/
public static void setHomeDirConf(final Configuration conf,
final String homedir) {
- setHomeDirConf( conf,
- Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, homedir);
+ setHomeDirConf(conf, getDefaultMountTableName(conf), homedir);
}
/**
@@ -202,7 +197,7 @@ public static void setHomeDirConf(final Configuration conf,
* @return home dir value, null if variable is not in conf
*/
public static String getHomeDirValue(final Configuration conf) {
- return getHomeDirValue(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE);
+ return getHomeDirValue(conf, getDefaultMountTableName(conf));
}
/**
@@ -216,4 +211,18 @@ public static String getHomeDirValue(final Configuration conf,
return conf.get(getConfigViewFsPrefix(mountTableName) + "." +
Constants.CONFIG_VIEWFS_HOMEDIR);
}
+
+ /**
+ * Get the name of the default mount table to use. If
+ * {@link Constants#CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY} is specified,
+ * it's value is returned. Otherwise,
+ * {@link Constants#CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE} is returned.
+ *
+ * @param conf Configuration to use.
+ * @return the name of the default mount table to use.
+ */
+ public static String getDefaultMountTableName(final Configuration conf) {
+ return conf.get(Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY,
+ Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE);
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
index 0a5d4b46ce2d8..492cb87ee024e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
@@ -41,12 +41,18 @@ public interface Constants {
* then the hadoop default value (/user) is used.
*/
public static final String CONFIG_VIEWFS_HOMEDIR = "homedir";
-
+
+ /**
+ * Config key to specify the name of the default mount table.
+ */
+ String CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY =
+ "fs.viewfs.mounttable.default.name.key";
+
/**
* Config variable name for the default mount table.
*/
public static final String CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE = "default";
-
+
/**
* Config variable full prefix for the default mount table.
*/
@@ -90,4 +96,25 @@ public interface Constants {
String CONFIG_VIEWFS_ENABLE_INNER_CACHE = "fs.viewfs.enable.inner.cache";
boolean CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT = true;
+
+ /**
+ * Enable ViewFileSystem to show mountlinks as symlinks.
+ */
+ String CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS =
+ "fs.viewfs.mount.links.as.symlinks";
+
+ boolean CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT = true;
+
+ /**
+ * When initializing the viewfs, authority will be used as the mount table
+ * name to find the mount link configurations. To make the mount table name
+ * unique, we may want to ignore port if initialized uri authority contains
+ * port number. By default, we will consider port number also in
+ * ViewFileSystem(This default value false, because to support existing
+ * deployments continue with the current behavior).
+ */
+ String CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME =
+ "fs.viewfs.ignore.port.in.mount.table.name";
+
+ boolean CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT = false;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
index 50c839b52b654..422e7337b57fb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.security.UserGroupInformation;
@@ -67,7 +68,7 @@ enum ResultKind {
// the root of the mount table
private final INode root;
// the fallback filesystem
- private final INodeLink rootFallbackLink;
+ private INodeLink rootFallbackLink;
// the homedir for this mount table
private final String homedirPrefix;
private List> mountPoints = new ArrayList>();
@@ -374,7 +375,7 @@ protected abstract T getTargetFileSystem(URI uri)
throws UnsupportedFileSystemException, URISyntaxException, IOException;
protected abstract T getTargetFileSystem(INodeDir dir)
- throws URISyntaxException;
+ throws URISyntaxException, IOException;
protected abstract T getTargetFileSystem(String settings, URI[] mergeFsURIs)
throws UnsupportedFileSystemException, URISyntaxException, IOException;
@@ -393,7 +394,7 @@ private boolean hasFallbackLink() {
return rootFallbackLink != null;
}
- private INodeLink getRootFallbackLink() {
+ protected INodeLink getRootFallbackLink() {
Preconditions.checkState(root.isInternalDir());
return rootFallbackLink;
}
@@ -460,12 +461,13 @@ Configuration getConfig() {
* @throws FileAlreadyExistsException
* @throws IOException
*/
- protected InodeTree(final Configuration config, final String viewName)
+ protected InodeTree(final Configuration config, final String viewName,
+ final URI theUri, boolean initingUriAsFallbackOnNoMounts)
throws UnsupportedFileSystemException, URISyntaxException,
FileAlreadyExistsException, IOException {
String mountTableName = viewName;
if (mountTableName == null) {
- mountTableName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE;
+ mountTableName = ConfigUtil.getDefaultMountTableName(config);
}
homedirPrefix = ConfigUtil.getHomeDirValue(config, mountTableName);
@@ -596,9 +598,19 @@ protected InodeTree(final Configuration config, final String viewName)
}
if (!gotMountTableEntry) {
- throw new IOException(
- "ViewFs: Cannot initialize: Empty Mount table in config for " +
- "viewfs://" + mountTableName + "/");
+ if (!initingUriAsFallbackOnNoMounts) {
+ throw new IOException(
+ "ViewFs: Cannot initialize: Empty Mount table in config for "
+ + "viewfs://" + mountTableName + "/");
+ }
+ StringBuilder msg =
+ new StringBuilder("Empty mount table detected for ").append(theUri)
+ .append(" and considering itself as a linkFallback.");
+ FileSystem.LOG.info(msg.toString());
+ rootFallbackLink =
+ new INodeLink(mountTableName, ugi, getTargetFileSystem(theUri),
+ theUri);
+ getRootDir().addFallbackLink(rootFallbackLink);
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 4f02feeebec8b..1fc531e05635d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -20,6 +20,10 @@
import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT;
+import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME;
+import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT;
+import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS;
+import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT;
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
import java.io.FileNotFoundException;
@@ -39,6 +43,7 @@
import java.util.Objects;
import java.util.Set;
+import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -253,6 +258,14 @@ public String getScheme() {
return FsConstants.VIEWFS_SCHEME;
}
+ /**
+ * Returns the ViewFileSystem type.
+ * @return viewfs
+ */
+ String getType() {
+ return FsConstants.VIEWFS_TYPE;
+ }
+
/**
* Called after a new FileSystem instance is constructed.
* @param theUri a uri whose authority section names the host, port, etc. for
@@ -271,9 +284,18 @@ public void initialize(final URI theUri, final Configuration conf)
final InnerCache innerCache = new InnerCache(fsGetter);
// Now build client side view (i.e. client side mount table) from config.
final String authority = theUri.getAuthority();
+ String tableName = authority;
+ if (theUri.getPort() != -1 && config
+ .getBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME,
+ CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT)) {
+ tableName = theUri.getHost();
+ }
try {
myUri = new URI(getScheme(), authority, "/", null, null);
- fsState = new InodeTree(conf, authority) {
+ boolean initingUriAsFallbackOnNoMounts =
+ !FsConstants.VIEWFS_TYPE.equals(getType());
+ fsState = new InodeTree(conf, tableName, theUri,
+ initingUriAsFallbackOnNoMounts) {
@Override
protected FileSystem getTargetFileSystem(final URI uri)
throws URISyntaxException, IOException {
@@ -288,8 +310,9 @@ protected FileSystem getTargetFileSystem(final URI uri)
@Override
protected FileSystem getTargetFileSystem(final INodeDir dir)
- throws URISyntaxException {
- return new InternalDirOfViewFs(dir, creationTime, ugi, myUri, config);
+ throws URISyntaxException {
+ return new InternalDirOfViewFs(dir, creationTime, ugi, myUri, config,
+ this);
}
@Override
@@ -488,6 +511,14 @@ private static FileStatus wrapLocalFileStatus(FileStatus orig,
: new ViewFsFileStatus(orig, qualified);
}
+ /**
+ * {@inheritDoc}
+ *
+ * If the given path is a symlink(mount link), the path will be resolved to a
+ * target path and it will get the resolved path's FileStatus object. It will
+ * not be represented as a symlink and isDirectory API returns true if the
+ * resolved path is a directory, false otherwise.
+ */
@Override
public FileStatus getFileStatus(final Path f) throws AccessControlException,
FileNotFoundException, IOException {
@@ -505,6 +536,33 @@ public void access(Path path, FsAction mode) throws AccessControlException,
res.targetFileSystem.access(res.remainingPath, mode);
}
+ /**
+ * {@inheritDoc}
+ *
+ * Note: listStatus considers listing from fallbackLink if available. If the
+ * same directory path is present in configured mount path as well as in
+ * fallback fs, then only the fallback path will be listed in the returned
+ * result except for link.
+ *
+ * If any of the the immediate children of the given path f is a symlink(mount
+ * link), the returned FileStatus object of that children would be represented
+ * as a symlink. It will not be resolved to the target path and will not get
+ * the target path FileStatus object. The target path will be available via
+ * getSymlink on that children's FileStatus object. Since it represents as
+ * symlink, isDirectory on that children's FileStatus will return false.
+ * This behavior can be changed by setting an advanced configuration
+ * fs.viewfs.mount.links.as.symlinks to false. In this case, mount points will
+ * be represented as non-symlinks and all the file/directory attributes like
+ * permissions, isDirectory etc will be assigned from it's resolved target
+ * directory/file.
+ *
+ * If you want to get the FileStatus of target path for that children, you may
+ * want to use GetFileStatus API with that children's symlink path. Please see
+ * {@link ViewFileSystem#getFileStatus(Path f)}
+ *
+ * Note: In ViewFileSystem, by default the mount links are represented as
+ * symlinks.
+ */
@Override
public FileStatus[] listStatus(final Path f) throws AccessControlException,
FileNotFoundException, IOException {
@@ -1087,11 +1145,14 @@ static class InternalDirOfViewFs extends FileSystem {
final long creationTime; // of the the mount table
final UserGroupInformation ugi; // the user/group of user who created mtable
final URI myUri;
+ private final boolean showMountLinksAsSymlinks;
+ private InodeTree fsState;
public InternalDirOfViewFs(final InodeTree.INodeDir dir,
final long cTime, final UserGroupInformation ugi, URI uri,
- Configuration config) throws URISyntaxException {
+ Configuration config, InodeTree fsState) throws URISyntaxException {
myUri = uri;
+ this.fsState = fsState;
try {
initialize(myUri, config);
} catch (IOException e) {
@@ -1100,6 +1161,9 @@ public InternalDirOfViewFs(final InodeTree.INodeDir dir,
theInternalDir = dir;
creationTime = cTime;
this.ugi = ugi;
+ showMountLinksAsSymlinks = config
+ .getBoolean(CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS,
+ CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT);
}
static private void checkPathIsSlash(final Path f) throws IOException {
@@ -1136,7 +1200,41 @@ public FSDataOutputStream append(final Path f, final int bufferSize,
public FSDataOutputStream create(final Path f,
final FsPermission permission, final boolean overwrite,
final int bufferSize, final short replication, final long blockSize,
- final Progressable progress) throws AccessControlException {
+ final Progressable progress) throws IOException {
+ Preconditions.checkNotNull(f, "File cannot be null.");
+ if (InodeTree.SlashPath.equals(f)) {
+ throw new FileAlreadyExistsException(
+ "/ is not a file. The directory / already exist at: "
+ + theInternalDir.fullPath);
+ }
+
+ if (this.fsState.getRootFallbackLink() != null) {
+
+ if (theInternalDir.getChildren().containsKey(f.getName())) {
+ throw new FileAlreadyExistsException(
+ "A mount path(file/dir) already exist with the requested path: "
+ + theInternalDir.getChildren().get(f.getName()).fullPath);
+ }
+
+ FileSystem linkedFallbackFs =
+ this.fsState.getRootFallbackLink().getTargetFileSystem();
+ Path parent = Path.getPathWithoutSchemeAndAuthority(
+ new Path(theInternalDir.fullPath));
+ String leaf = f.getName();
+ Path fileToCreate = new Path(parent, leaf);
+
+ try {
+ return linkedFallbackFs
+ .create(fileToCreate, permission, overwrite, bufferSize,
+ replication, blockSize, progress);
+ } catch (IOException e) {
+ StringBuilder msg =
+ new StringBuilder("Failed to create file:").append(fileToCreate)
+ .append(" at fallback : ").append(linkedFallbackFs.getUri());
+ LOG.error(msg.toString(), e);
+ throw e;
+ }
+ }
throw readOnlyMountTable("create", f);
}
@@ -1174,86 +1272,151 @@ public FileStatus getFileStatus(Path f) throws IOException {
checkPathIsSlash(f);
return new FileStatus(0, true, 0, 0, creationTime, creationTime,
PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(),
-
new Path(theInternalDir.fullPath).makeQualified(
myUri, ROOT_PATH));
}
- /**
- * {@inheritDoc}
- *
- * Note: listStatus on root("/") considers listing from fallbackLink if
- * available. If the same directory name is present in configured mount
- * path as well as in fallback link, then only the configured mount path
- * will be listed in the returned result.
- */
@Override
public FileStatus[] listStatus(Path f) throws AccessControlException,
FileNotFoundException, IOException {
checkPathIsSlash(f);
FileStatus[] fallbackStatuses = listStatusForFallbackLink();
- FileStatus[] result = new FileStatus[theInternalDir.getChildren().size()];
+ Set linkStatuses = new HashSet<>();
+ Set internalDirStatuses = new HashSet<>();
int i = 0;
for (Entry> iEntry :
theInternalDir.getChildren().entrySet()) {
INode inode = iEntry.getValue();
+ Path path = new Path(inode.fullPath).makeQualified(myUri, null);
if (inode.isLink()) {
INodeLink link = (INodeLink) inode;
- result[i++] = new FileStatus(0, false, 0, 0,
- creationTime, creationTime, PERMISSION_555,
- ugi.getShortUserName(), ugi.getPrimaryGroupName(),
- link.getTargetLink(),
- new Path(inode.fullPath).makeQualified(
- myUri, null));
+ if (showMountLinksAsSymlinks) {
+ // To maintain backward compatibility, with default option(showing
+ // mount links as symlinks), we will represent target link as
+ // symlink and rest other properties are belongs to mount link only.
+ linkStatuses.add(
+ new FileStatus(0, false, 0, 0, creationTime, creationTime,
+ PERMISSION_555, ugi.getShortUserName(),
+ ugi.getPrimaryGroupName(), link.getTargetLink(), path));
+ continue;
+ }
+
+ // We will represent as non-symlinks. Here it will show target
+ // directory/file properties like permissions, isDirectory etc on
+ // mount path. The path will be a mount link path and isDirectory is
+ // true if target is dir, otherwise false.
+ String linkedPath = link.getTargetFileSystem().getUri().getPath();
+ if ("".equals(linkedPath)) {
+ linkedPath = "/";
+ }
+ try {
+ FileStatus status =
+ ((ChRootedFileSystem)link.getTargetFileSystem())
+ .getMyFs().getFileStatus(new Path(linkedPath));
+ linkStatuses.add(
+ new FileStatus(status.getLen(), status.isDirectory(),
+ status.getReplication(), status.getBlockSize(),
+ status.getModificationTime(), status.getAccessTime(),
+ status.getPermission(), status.getOwner(),
+ status.getGroup(), null, path));
+ } catch (FileNotFoundException ex) {
+ LOG.warn("Cannot get one of the children's(" + path
+ + ") target path(" + link.getTargetFileSystem().getUri()
+ + ") file status.", ex);
+ throw ex;
+ }
} else {
- result[i++] = new FileStatus(0, true, 0, 0,
- creationTime, creationTime, PERMISSION_555,
- ugi.getShortUserName(), ugi.getGroupNames()[0],
- new Path(inode.fullPath).makeQualified(
- myUri, null));
+ internalDirStatuses.add(
+ new FileStatus(0, true, 0, 0, creationTime, creationTime,
+ PERMISSION_555, ugi.getShortUserName(),
+ ugi.getPrimaryGroupName(), path));
}
}
+ FileStatus[] internalDirStatusesMergedWithFallBack = internalDirStatuses
+ .toArray(new FileStatus[internalDirStatuses.size()]);
if (fallbackStatuses.length > 0) {
- return consolidateFileStatuses(fallbackStatuses, result);
- } else {
- return result;
+ internalDirStatusesMergedWithFallBack =
+ merge(fallbackStatuses, internalDirStatusesMergedWithFallBack);
}
+ // Links will always have precedence than internalDir or fallback paths.
+ return merge(linkStatuses.toArray(new FileStatus[linkStatuses.size()]),
+ internalDirStatusesMergedWithFallBack);
}
- private FileStatus[] consolidateFileStatuses(FileStatus[] fallbackStatuses,
- FileStatus[] mountPointStatuses) {
+ private FileStatus[] merge(FileStatus[] toStatuses,
+ FileStatus[] fromStatuses) {
ArrayList result = new ArrayList<>();
Set pathSet = new HashSet<>();
- for (FileStatus status : mountPointStatuses) {
+ for (FileStatus status : toStatuses) {
result.add(status);
pathSet.add(status.getPath().getName());
}
- for (FileStatus status : fallbackStatuses) {
+ for (FileStatus status : fromStatuses) {
if (!pathSet.contains(status.getPath().getName())) {
result.add(status);
}
}
- return result.toArray(new FileStatus[0]);
+ return result.toArray(new FileStatus[result.size()]);
}
private FileStatus[] listStatusForFallbackLink() throws IOException {
- if (theInternalDir.isRoot() &&
- theInternalDir.getFallbackLink() != null) {
- FileSystem linkedFs =
- theInternalDir.getFallbackLink().getTargetFileSystem();
- // Fallback link is only applicable for root
- FileStatus[] statuses = linkedFs.listStatus(new Path("/"));
- for (FileStatus status : statuses) {
- // Fix the path back to viewfs scheme
- status.setPath(
- new Path(myUri.toString(), status.getPath().getName()));
+ if (this.fsState.getRootFallbackLink() != null) {
+ FileSystem linkedFallbackFs =
+ this.fsState.getRootFallbackLink().getTargetFileSystem();
+ Path p = Path.getPathWithoutSchemeAndAuthority(
+ new Path(theInternalDir.fullPath));
+ if (theInternalDir.isRoot() || linkedFallbackFs.exists(p)) {
+ FileStatus[] statuses = linkedFallbackFs.listStatus(p);
+ for (FileStatus status : statuses) {
+ // Fix the path back to viewfs scheme
+ Path pathFromConfiguredFallbackRoot =
+ new Path(p, status.getPath().getName());
+ status.setPath(
+ new Path(myUri.toString(), pathFromConfiguredFallbackRoot));
+ }
+ return statuses;
}
- return statuses;
- } else {
- return new FileStatus[0];
}
+ return new FileStatus[0];
+ }
+
+ @Override
+ public ContentSummary getContentSummary(Path f) throws IOException {
+ long[] summary = {0, 0, 1};
+ for (FileStatus status : listStatus(f)) {
+ Path targetPath =
+ Path.getPathWithoutSchemeAndAuthority(status.getPath());
+ InodeTree.ResolveResult res =
+ fsState.resolve(targetPath.toString(), true);
+ ContentSummary child =
+ res.targetFileSystem.getContentSummary(res.remainingPath);
+ summary[0] += child.getLength();
+ summary[1] += child.getFileCount();
+ summary[2] += child.getDirectoryCount();
+ }
+ return new ContentSummary.Builder()
+ .length(summary[0])
+ .fileCount(summary[1])
+ .directoryCount(summary[2])
+ .build();
+ }
+
+ @Override
+ public FsStatus getStatus(Path p) throws IOException {
+ long[] summary = {0, 0, 0};
+ for (FileStatus status : listStatus(p)) {
+ Path targetPath =
+ Path.getPathWithoutSchemeAndAuthority(status.getPath());
+ InodeTree.ResolveResult res =
+ fsState.resolve(targetPath.toString(), true);
+ FsStatus child = res.targetFileSystem.getStatus(res.remainingPath);
+ summary[0] += child.getCapacity();
+ summary[1] += child.getUsed();
+ summary[2] += child.getRemaining();
+ }
+ return new FsStatus(summary[0], summary[1], summary[2]);
}
@Override
@@ -1267,6 +1430,31 @@ public boolean mkdirs(Path dir, FsPermission permission)
dir.toString().substring(1))) {
return true; // this is the stupid semantics of FileSystem
}
+
+ if (this.fsState.getRootFallbackLink() != null) {
+ FileSystem linkedFallbackFs =
+ this.fsState.getRootFallbackLink().getTargetFileSystem();
+ Path parent = Path.getPathWithoutSchemeAndAuthority(
+ new Path(theInternalDir.fullPath));
+ String leafChild = (InodeTree.SlashPath.equals(dir)) ?
+ InodeTree.SlashPath.toString() :
+ dir.getName();
+ Path dirToCreate = new Path(parent, leafChild);
+
+ try {
+ return linkedFallbackFs.mkdirs(dirToCreate, permission);
+ } catch (IOException e) {
+ if (LOG.isDebugEnabled()) {
+ StringBuilder msg =
+ new StringBuilder("Failed to create ").append(dirToCreate)
+ .append(" at fallback : ")
+ .append(linkedFallbackFs.getUri());
+ LOG.debug(msg.toString(), e);
+ }
+ return false;
+ }
+ }
+
throw readOnlyMountTable("mkdirs", dir);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java
index 36f9cd104cb6b..2165a3f9ee688 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java
@@ -31,6 +31,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME;
+
/******************************************************************************
* This class is extended from the ViewFileSystem for the overloaded scheme
* file system. Mount link configurations and in-memory mount table
@@ -59,9 +61,9 @@
* data to mount with other hdfs and object store clusters(hdfs://NN1,
* o3fs://bucket1.volume1/, s3a://bucket1/)
*
- * fs.viewfs.mounttable.Cluster./user = hdfs://NN1/user
- * fs.viewfs.mounttable.Cluster./data = o3fs://bucket1.volume1/data
- * fs.viewfs.mounttable.Cluster./backup = s3a://bucket1/backup/
+ * fs.viewfs.mounttable.Cluster.link./user = hdfs://NN1/user
+ * fs.viewfs.mounttable.Cluster.link./data = o3fs://bucket1.volume1/data
+ * fs.viewfs.mounttable.Cluster.link./backup = s3a://bucket1/backup/
*
* Op1: Create file hdfs://Cluster/user/fileA will go to hdfs://NN1/user/fileA
* Op2: Create file hdfs://Cluster/data/datafile will go to
@@ -75,15 +77,28 @@
* data to mount with other hdfs and object store clusters
* (hdfs://NN1, o3fs://bucket1.volume1/)
*
- * fs.viewfs.mounttable.bucketA./user = hdfs://NN1/user
- * fs.viewfs.mounttable.bucketA./data = o3fs://bucket1.volume1/data
- * fs.viewfs.mounttable.bucketA./salesDB = s3a://bucketA/salesDB/
+ * fs.viewfs.mounttable.bucketA.link./user = hdfs://NN1/user
+ * fs.viewfs.mounttable.bucketA.link./data = o3fs://bucket1.volume1/data
+ * fs.viewfs.mounttable.bucketA.link./salesDB = s3a://bucketA/salesDB/
*
* Op1: Create file s3a://bucketA/user/fileA will go to hdfs://NN1/user/fileA
* Op2: Create file s3a://bucketA/data/datafile will go to
* o3fs://bucket1.volume1/data/datafile
* Op3: Create file s3a://bucketA/salesDB/dbfile will go to
* s3a://bucketA/salesDB/dbfile
+ *
+ * Note:
+ * (1) In ViewFileSystemOverloadScheme, by default the mount links will be
+ * represented as non-symlinks. If you want to change this behavior, please see
+ * {@link ViewFileSystem#listStatus(Path)}
+ * (2) In ViewFileSystemOverloadScheme, only the initialized uri's hostname will
+ * be considered as the mount table name. When the passed uri has hostname:port,
+ * it will simply ignore the port number and only hostname will be considered as
+ * the mount table name.
+ * (3) If there are no mount links configured with the initializing uri's
+ * hostname as the mount table name, then it will automatically consider the
+ * current uri as fallback( ex: fs.viewfs.mounttable..linkFallBack)
+ * target fs uri.
*****************************************************************************/
@InterfaceAudience.LimitedPrivate({ "MapReduce", "HBase", "Hive" })
@InterfaceStability.Evolving
@@ -98,6 +113,14 @@ public String getScheme() {
return myUri.getScheme();
}
+ /**
+ * Returns the ViewFileSystem type.
+ * @return viewfs
+ */
+ String getType() {
+ return FsConstants.VIEWFSOS_TYPE;
+ }
+
@Override
public void initialize(URI theUri, Configuration conf) throws IOException {
this.myUri = theUri;
@@ -107,6 +130,14 @@ public void initialize(URI theUri, Configuration conf) throws IOException {
}
String mountTableConfigPath =
conf.get(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH);
+ /* The default value to false in ViewFSOverloadScheme */
+ conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS,
+ conf.getBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS,
+ false));
+ /* the default value to true in ViewFSOverloadScheme */
+ conf.setBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME,
+ conf.getBoolean(Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME,
+ true));
if (null != mountTableConfigPath) {
MountTableConfigLoader loader = new HCFSMountTableConfigLoader();
loader.load(mountTableConfigPath, conf);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java
index c8a1d78cffd46..f486a10b4c8f9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java
@@ -51,6 +51,17 @@ public static boolean isViewFileSystem(final FileSystem fileSystem) {
return fileSystem.getScheme().equals(FsConstants.VIEWFS_SCHEME);
}
+ /**
+ * Check if the FileSystem is a ViewFileSystemOverloadScheme.
+ *
+ * @param fileSystem
+ * @return true if the fileSystem is ViewFileSystemOverloadScheme
+ */
+ public static boolean isViewFileSystemOverloadScheme(
+ final FileSystem fileSystem) {
+ return fileSystem instanceof ViewFileSystemOverloadScheme;
+ }
+
/**
* Get FsStatus for all ViewFsMountPoints matching path for the given
* ViewFileSystem.
@@ -93,7 +104,8 @@ public static boolean isViewFileSystem(final FileSystem fileSystem) {
*/
public static Map getStatus(
FileSystem fileSystem, Path path) throws IOException {
- if (!isViewFileSystem(fileSystem)) {
+ if (!(isViewFileSystem(fileSystem)
+ || isViewFileSystemOverloadScheme(fileSystem))) {
throw new UnsupportedFileSystemException("FileSystem '"
+ fileSystem.getUri() + "'is not a ViewFileSystem.");
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
index 607bdb8d423a0..95b596bde367d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.fs.viewfs;
+import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS;
+import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT;
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
import java.io.FileNotFoundException;
@@ -31,6 +33,8 @@
import java.util.Map.Entry;
import java.util.Set;
+
+import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -42,6 +46,7 @@
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.FsServerDefaults;
@@ -67,7 +72,8 @@
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.Time;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* ViewFs (extends the AbstractFileSystem interface) implements a client-side
@@ -154,6 +160,7 @@
@InterfaceAudience.Public
@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
public class ViewFs extends AbstractFileSystem {
+ static final Logger LOG = LoggerFactory.getLogger(ViewFs.class);
final long creationTime; // of the the mount table
final UserGroupInformation ugi; // the user/group of user who created mtable
final Configuration config;
@@ -161,6 +168,7 @@ public class ViewFs extends AbstractFileSystem {
Path homeDir = null;
private ViewFileSystem.RenameStrategy renameStrategy =
ViewFileSystem.RenameStrategy.SAME_MOUNTPOINT;
+ private static boolean showMountLinksAsSymlinks = true;
static AccessControlException readOnlyMountTable(final String operation,
final String p) {
@@ -188,7 +196,16 @@ URI[] getTargets() {
return targets;
}
}
-
+
+ /**
+ * Returns the ViewFileSystem type.
+ *
+ * @return viewfs
+ */
+ String getType() {
+ return FsConstants.VIEWFS_TYPE;
+ }
+
public ViewFs(final Configuration conf) throws IOException,
URISyntaxException {
this(FsConstants.VIEWFS_URI, conf);
@@ -209,9 +226,15 @@ public ViewFs(final Configuration conf) throws IOException,
creationTime = Time.now();
ugi = UserGroupInformation.getCurrentUser();
config = conf;
+ showMountLinksAsSymlinks = config
+ .getBoolean(CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS,
+ CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT);
// Now build client side view (i.e. client side mount table) from config.
String authority = theUri.getAuthority();
- fsState = new InodeTree(conf, authority) {
+ boolean initingUriAsFallbackOnNoMounts =
+ !FsConstants.VIEWFS_TYPE.equals(getType());
+ fsState = new InodeTree(conf, authority, theUri,
+ initingUriAsFallbackOnNoMounts) {
@Override
protected AbstractFileSystem getTargetFileSystem(final URI uri)
@@ -228,7 +251,8 @@ protected AbstractFileSystem getTargetFileSystem(final URI uri)
@Override
protected AbstractFileSystem getTargetFileSystem(
final INodeDir dir) throws URISyntaxException {
- return new InternalDirOfViewFs(dir, creationTime, ugi, getUri());
+ return new InternalDirOfViewFs(dir, creationTime, ugi, getUri(), this,
+ config);
}
@Override
@@ -351,6 +375,14 @@ public FileChecksum getFileChecksum(final Path f)
return res.targetFileSystem.getFileChecksum(res.remainingPath);
}
+ /**
+ * {@inheritDoc}
+ *
+ * If the given path is a symlink(mount link), the path will be resolved to a
+ * target path and it will get the resolved path's FileStatus object. It will
+ * not be represented as a symlink and isDirectory API returns true if the
+ * resolved path is a directory, false otherwise.
+ */
@Override
public FileStatus getFileStatus(final Path f) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException {
@@ -436,6 +468,32 @@ public LocatedFileStatus getViewFsFileStatus(LocatedFileStatus stat,
};
}
+ /**
+ * {@inheritDoc}
+ *
+ * Note: listStatus considers listing from fallbackLink if available. If the
+ * same directory path is present in configured mount path as well as in
+ * fallback fs, then only the fallback path will be listed in the returned
+ * result except for link.
+ *
+ * If any of the the immediate children of the given path f is a symlink(mount
+ * link), the returned FileStatus object of that children would be represented
+ * as a symlink. It will not be resolved to the target path and will not get
+ * the target path FileStatus object. The target path will be available via
+ * getSymlink on that children's FileStatus object. Since it represents as
+ * symlink, isDirectory on that children's FileStatus will return false.
+ * This behavior can be changed by setting an advanced configuration
+ * fs.viewfs.mount.links.as.symlinks to false. In this case, mount points will
+ * be represented as non-symlinks and all the file/directory attributes like
+ * permissions, isDirectory etc will be assigned from it's resolved target
+ * directory/file.
+ *
+ * If you want to get the FileStatus of target path for that children, you may
+ * want to use GetFileStatus API with that children's symlink path. Please see
+ * {@link ViewFs#getFileStatus(Path f)}
+ *
+ * Note: In ViewFs, by default the mount links are represented as symlinks.
+ */
@Override
public FileStatus[] listStatus(final Path f) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException {
@@ -843,15 +901,20 @@ static class InternalDirOfViewFs extends AbstractFileSystem {
final long creationTime; // of the the mount table
final UserGroupInformation ugi; // the user/group of user who created mtable
final URI myUri; // the URI of the outer ViewFs
-
+ private InodeTree fsState;
+ private Configuration conf;
+
public InternalDirOfViewFs(final InodeTree.INodeDir dir,
- final long cTime, final UserGroupInformation ugi, final URI uri)
+ final long cTime, final UserGroupInformation ugi, final URI uri,
+ InodeTree fsState, Configuration conf)
throws URISyntaxException {
super(FsConstants.VIEWFS_URI, FsConstants.VIEWFS_SCHEME, false, -1);
theInternalDir = dir;
creationTime = cTime;
this.ugi = ugi;
myUri = uri;
+ this.fsState = fsState;
+ this.conf = conf;
}
static private void checkPathIsSlash(final Path f) throws IOException {
@@ -870,6 +933,41 @@ public FSDataOutputStream createInternal(final Path f,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException,
UnresolvedLinkException, IOException {
+ Preconditions.checkNotNull(f, "File cannot be null.");
+ if (InodeTree.SlashPath.equals(f)) {
+ throw new FileAlreadyExistsException(
+ "/ is not a file. The directory / already exist at: "
+ + theInternalDir.fullPath);
+ }
+
+ if (this.fsState.getRootFallbackLink() != null) {
+ if (theInternalDir.getChildren().containsKey(f.getName())) {
+ throw new FileAlreadyExistsException(
+ "A mount path(file/dir) already exist with the requested path: "
+ + theInternalDir.getChildren().get(f.getName()).fullPath);
+ }
+
+ AbstractFileSystem linkedFallbackFs =
+ this.fsState.getRootFallbackLink().getTargetFileSystem();
+ Path parent = Path.getPathWithoutSchemeAndAuthority(
+ new Path(theInternalDir.fullPath));
+ String leaf = f.getName();
+ Path fileToCreate = new Path(parent, leaf);
+
+ try {
+ return linkedFallbackFs
+ .createInternal(fileToCreate, flag, absolutePermission,
+ bufferSize, replication, blockSize, progress, checksumOpt,
+ true);
+ } catch (IOException e) {
+ StringBuilder msg =
+ new StringBuilder("Failed to create file:").append(fileToCreate)
+ .append(" at fallback : ").append(linkedFallbackFs.getUri());
+ LOG.error(msg.toString(), e);
+ throw e;
+ }
+ }
+
throw readOnlyMountTable("create", f);
}
@@ -917,11 +1015,25 @@ public FileStatus getFileLinkStatus(final Path f)
if (inode.isLink()) {
INodeLink inodelink =
(INodeLink) inode;
- result = new FileStatus(0, false, 0, 0, creationTime, creationTime,
+ try {
+ String linkedPath = inodelink.getTargetFileSystem()
+ .getUri().getPath();
+ FileStatus status = ((ChRootedFs)inodelink.getTargetFileSystem())
+ .getMyFs().getFileStatus(new Path(linkedPath));
+ result = new FileStatus(status.getLen(), false,
+ status.getReplication(), status.getBlockSize(),
+ status.getModificationTime(), status.getAccessTime(),
+ status.getPermission(), status.getOwner(), status.getGroup(),
+ inodelink.getTargetLink(),
+ new Path(inode.fullPath).makeQualified(
+ myUri, null));
+ } catch (FileNotFoundException ex) {
+ result = new FileStatus(0, false, 0, 0, creationTime, creationTime,
PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(),
inodelink.getTargetLink(),
new Path(inode.fullPath).makeQualified(
myUri, null));
+ }
} else {
result = new FileStatus(0, true, 0, 0, creationTime, creationTime,
PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(),
@@ -961,83 +1073,145 @@ public int getUriDefaultPort() {
* will be listed in the returned result.
*/
@Override
- public FileStatus[] listStatus(final Path f) throws AccessControlException,
- IOException {
+ public FileStatus[] listStatus(final Path f) throws IOException {
checkPathIsSlash(f);
FileStatus[] fallbackStatuses = listStatusForFallbackLink();
- FileStatus[] result = new FileStatus[theInternalDir.getChildren().size()];
+ Set linkStatuses = new HashSet<>();
+ Set internalDirStatuses = new HashSet<>();
int i = 0;
for (Entry> iEntry :
theInternalDir.getChildren().entrySet()) {
INode inode = iEntry.getValue();
-
-
+ Path path = new Path(inode.fullPath).makeQualified(myUri, null);
if (inode.isLink()) {
INodeLink link =
(INodeLink) inode;
- result[i++] = new FileStatus(0, false, 0, 0,
- creationTime, creationTime,
- PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(),
- link.getTargetLink(),
- new Path(inode.fullPath).makeQualified(
- myUri, null));
+ if (showMountLinksAsSymlinks) {
+ // To maintain backward compatibility, with default option(showing
+ // mount links as symlinks), we will represent target link as
+ // symlink and rest other properties are belongs to mount link only.
+ linkStatuses.add(
+ new FileStatus(0, false, 0, 0, creationTime, creationTime,
+ PERMISSION_555, ugi.getShortUserName(),
+ ugi.getPrimaryGroupName(), link.getTargetLink(), path));
+ continue;
+ }
+
+ // We will represent as non-symlinks. Here it will show target
+ // directory/file properties like permissions, isDirectory etc on
+ // mount path. The path will be a mount link path and isDirectory is
+ // true if target is dir, otherwise false.
+ String linkedPath = link.getTargetFileSystem().getUri().getPath();
+ if ("".equals(linkedPath)) {
+ linkedPath = "/";
+ }
+ try {
+ FileStatus status =
+ ((ChRootedFs) link.getTargetFileSystem()).getMyFs()
+ .getFileStatus(new Path(linkedPath));
+ linkStatuses.add(
+ new FileStatus(status.getLen(), status.isDirectory(),
+ status.getReplication(), status.getBlockSize(),
+ status.getModificationTime(), status.getAccessTime(),
+ status.getPermission(), status.getOwner(),
+ status.getGroup(), null, path));
+ } catch (FileNotFoundException ex) {
+ LOG.warn("Cannot get one of the children's(" + path
+ + ") target path(" + link.getTargetFileSystem().getUri()
+ + ") file status.", ex);
+ throw ex;
+ }
} else {
- result[i++] = new FileStatus(0, true, 0, 0,
- creationTime, creationTime,
- PERMISSION_555, ugi.getShortUserName(), ugi.getGroupNames()[0],
- new Path(inode.fullPath).makeQualified(
- myUri, null));
+ internalDirStatuses.add(
+ new FileStatus(0, true, 0, 0, creationTime, creationTime,
+ PERMISSION_555, ugi.getShortUserName(),
+ ugi.getPrimaryGroupName(), path));
}
}
+
+ FileStatus[] internalDirStatusesMergedWithFallBack = internalDirStatuses
+ .toArray(new FileStatus[internalDirStatuses.size()]);
if (fallbackStatuses.length > 0) {
- return consolidateFileStatuses(fallbackStatuses, result);
- } else {
- return result;
+ internalDirStatusesMergedWithFallBack =
+ merge(fallbackStatuses, internalDirStatusesMergedWithFallBack);
}
+
+ // Links will always have precedence than internalDir or fallback paths.
+ return merge(linkStatuses.toArray(new FileStatus[linkStatuses.size()]),
+ internalDirStatusesMergedWithFallBack);
}
- private FileStatus[] consolidateFileStatuses(FileStatus[] fallbackStatuses,
- FileStatus[] mountPointStatuses) {
+ private FileStatus[] merge(FileStatus[] toStatuses,
+ FileStatus[] fromStatuses) {
ArrayList result = new ArrayList<>();
Set pathSet = new HashSet<>();
- for (FileStatus status : mountPointStatuses) {
+ for (FileStatus status : toStatuses) {
result.add(status);
pathSet.add(status.getPath().getName());
}
- for (FileStatus status : fallbackStatuses) {
+ for (FileStatus status : fromStatuses) {
if (!pathSet.contains(status.getPath().getName())) {
result.add(status);
}
}
- return result.toArray(new FileStatus[0]);
+ return result.toArray(new FileStatus[result.size()]);
}
private FileStatus[] listStatusForFallbackLink() throws IOException {
- if (theInternalDir.isRoot() &&
- theInternalDir.getFallbackLink() != null) {
- AbstractFileSystem linkedFs =
- theInternalDir.getFallbackLink().getTargetFileSystem();
- // Fallback link is only applicable for root
- FileStatus[] statuses = linkedFs.listStatus(new Path("/"));
- for (FileStatus status : statuses) {
- // Fix the path back to viewfs scheme
- status.setPath(
- new Path(myUri.toString(), status.getPath().getName()));
+ if (fsState.getRootFallbackLink() != null) {
+ AbstractFileSystem linkedFallbackFs =
+ fsState.getRootFallbackLink().getTargetFileSystem();
+ Path p = Path.getPathWithoutSchemeAndAuthority(
+ new Path(theInternalDir.fullPath));
+ if (theInternalDir.isRoot() || FileContext
+ .getFileContext(linkedFallbackFs, conf).util().exists(p)) {
+ // Fallback link is only applicable for root
+ FileStatus[] statuses = linkedFallbackFs.listStatus(p);
+ for (FileStatus status : statuses) {
+ // Fix the path back to viewfs scheme
+ Path pathFromConfiguredFallbackRoot =
+ new Path(p, status.getPath().getName());
+ status.setPath(
+ new Path(myUri.toString(), pathFromConfiguredFallbackRoot));
+ }
+ return statuses;
}
- return statuses;
- } else {
- return new FileStatus[0];
}
+ return new FileStatus[0];
}
@Override
public void mkdir(final Path dir, final FsPermission permission,
- final boolean createParent) throws AccessControlException,
- FileAlreadyExistsException {
+ final boolean createParent) throws IOException {
if (theInternalDir.isRoot() && dir == null) {
throw new FileAlreadyExistsException("/ already exits");
}
+
+ if (this.fsState.getRootFallbackLink() != null) {
+ AbstractFileSystem linkedFallbackFs =
+ this.fsState.getRootFallbackLink().getTargetFileSystem();
+ Path parent = Path.getPathWithoutSchemeAndAuthority(
+ new Path(theInternalDir.fullPath));
+ String leafChild = (InodeTree.SlashPath.equals(dir)) ?
+ InodeTree.SlashPath.toString() :
+ dir.getName();
+ Path dirToCreate = new Path(parent, leafChild);
+ try {
+ // We are here because, the parent dir already exist in the mount
+ // table internal tree. So, let's create parent always in fallback.
+ linkedFallbackFs.mkdir(dirToCreate, permission, true);
+ return;
+ } catch (IOException e) {
+ if (LOG.isDebugEnabled()) {
+ StringBuilder msg = new StringBuilder("Failed to create {}")
+ .append(" at fallback fs : {}");
+ LOG.debug(msg.toString(), dirToCreate, linkedFallbackFs.getUri());
+ }
+ throw e;
+ }
+ }
+
throw readOnlyMountTable("mkdir", dir);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
index 4fc52d557cf9d..5ad71f373f2d0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
@@ -213,7 +213,7 @@ public void failover(HAServiceTarget fromSvc,
// Fence fromSvc if it's required or forced by the user
if (tryFence) {
- if (!fromSvc.getFencer().fence(fromSvc)) {
+ if (!fromSvc.getFencer().fence(fromSvc, toSvc)) {
throw new FailoverFailedException("Unable to fence " +
fromSvc + ". Fencing failed.");
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
index 0950ea7e01c57..34e37650ade1c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
@@ -19,9 +19,9 @@
import java.io.IOException;
import java.io.PrintStream;
-import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
+import java.util.Collections;
import java.util.Map;
import org.apache.commons.cli.Options;
@@ -107,8 +107,7 @@ protected HAAdmin(Configuration conf) {
protected abstract HAServiceTarget resolveTarget(String string);
protected Collection getTargetIds(String targetNodeToActivate) {
- return new ArrayList(
- Arrays.asList(new String[]{targetNodeToActivate}));
+ return Collections.singleton(targetNodeToActivate);
}
protected String getUsageString() {
@@ -188,8 +187,10 @@ private int transitionToActive(final CommandLine cmd)
private boolean isOtherTargetNodeActive(String targetNodeToActivate, boolean forceActive)
throws IOException {
Collection targetIds = getTargetIds(targetNodeToActivate);
- targetIds.remove(targetNodeToActivate);
- for(String targetId : targetIds) {
+ for (String targetId : targetIds) {
+ if (targetNodeToActivate.equals(targetId)) {
+ continue;
+ }
HAServiceTarget target = resolveTarget(targetId);
if (!checkManualStateManagementOK(target)) {
return true;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java
index 9d5c8e7b7ea3b..ff9658f1bbc03 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java
@@ -44,6 +44,12 @@ public abstract class HAServiceTarget {
private static final String PORT_SUBST_KEY = "port";
private static final String ADDRESS_SUBST_KEY = "address";
+ /**
+ * The HAState this service target is intended to be after transition
+ * is complete.
+ */
+ private HAServiceProtocol.HAServiceState transitionTargetHAStatus;
+
/**
* @return the IPC address of the target node.
*/
@@ -93,6 +99,15 @@ public HAServiceProtocol getProxy(Configuration conf, int timeoutMs)
return getProxyForAddress(conf, timeoutMs, getAddress());
}
+ public void setTransitionTargetHAStatus(
+ HAServiceProtocol.HAServiceState status) {
+ this.transitionTargetHAStatus = status;
+ }
+
+ public HAServiceProtocol.HAServiceState getTransitionTargetHAStatus() {
+ return this.transitionTargetHAStatus;
+ }
+
/**
* Returns a proxy to connect to the target HA service for health monitoring.
* If {@link #getHealthMonitorAddress()} is implemented to return a non-null
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
index 64e7315130257..b0cead56ac0e7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
@@ -89,15 +89,32 @@ public static NodeFencer create(Configuration conf, String confKey)
}
public boolean fence(HAServiceTarget fromSvc) {
+ return fence(fromSvc, null);
+ }
+
+ public boolean fence(HAServiceTarget fromSvc, HAServiceTarget toSvc) {
LOG.info("====== Beginning Service Fencing Process... ======");
int i = 0;
for (FenceMethodWithArg method : methods) {
LOG.info("Trying method " + (++i) + "/" + methods.size() +": " + method);
try {
- if (method.method.tryFence(fromSvc, method.arg)) {
- LOG.info("====== Fencing successful by method " + method + " ======");
- return true;
+ // only true when target node is given, AND fencing on it failed
+ boolean toSvcFencingFailed = false;
+ // if target is given, try to fence on target first. Only if fencing
+ // on target succeeded, do fencing on source node.
+ if (toSvc != null) {
+ toSvcFencingFailed = !method.method.tryFence(toSvc, method.arg);
+ }
+ if (toSvcFencingFailed) {
+ LOG.error("====== Fencing on target failed, skipping fencing "
+ + "on source ======");
+ } else {
+ if (method.method.tryFence(fromSvc, method.arg)) {
+ LOG.info("====== Fencing successful by method "
+ + method + " ======");
+ return true;
+ }
}
} catch (BadFencingConfigurationException e) {
LOG.error("Fencing method " + method + " misconfigured", e);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java
index 7e4a88f729fad..6363063abf2e2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java
@@ -19,6 +19,7 @@
import java.io.IOException;
import java.lang.reflect.Field;
+import java.util.Arrays;
import java.util.Map;
import org.apache.hadoop.conf.Configured;
@@ -60,6 +61,11 @@ public class ShellCommandFencer
/** Prefix for target parameters added to the environment */
private static final String TARGET_PREFIX = "target_";
+ /** Prefix for source parameters added to the environment */
+ private static final String SOURCE_PREFIX = "source_";
+
+ private static final String ARG_DELIMITER = ",";
+
@VisibleForTesting
static Logger LOG = LoggerFactory.getLogger(ShellCommandFencer.class);
@@ -73,8 +79,9 @@ public void checkArgs(String args) throws BadFencingConfigurationException {
}
@Override
- public boolean tryFence(HAServiceTarget target, String cmd) {
+ public boolean tryFence(HAServiceTarget target, String args) {
ProcessBuilder builder;
+ String cmd = parseArgs(target.getTransitionTargetHAStatus(), args);
if (!Shell.WINDOWS) {
builder = new ProcessBuilder("bash", "-e", "-c", cmd);
@@ -127,6 +134,28 @@ public boolean tryFence(HAServiceTarget target, String cmd) {
return rc == 0;
}
+ private String parseArgs(HAServiceProtocol.HAServiceState state,
+ String cmd) {
+ String[] args = cmd.split(ARG_DELIMITER);
+ if (args.length == 1) {
+ // only one command is given, assuming both src and dst
+ // will execute the same command/script.
+ return args[0];
+ }
+ if (args.length > 2) {
+ throw new IllegalArgumentException("Expecting arguments size of at most "
+ + "two, getting " + Arrays.asList(args));
+ }
+ if (HAServiceProtocol.HAServiceState.ACTIVE.equals(state)) {
+ return args[0];
+ } else if (HAServiceProtocol.HAServiceState.STANDBY.equals(state)) {
+ return args[1];
+ } else {
+ throw new IllegalArgumentException(
+ "Unexpected HA service state:" + state);
+ }
+ }
+
/**
* Abbreviate a string by putting '...' in the middle of it,
* in an attempt to keep logs from getting too messy.
@@ -190,9 +219,24 @@ private void setConfAsEnvVars(Map env) {
*/
private void addTargetInfoAsEnvVars(HAServiceTarget target,
Map environment) {
+ String prefix;
+ HAServiceProtocol.HAServiceState targetState =
+ target.getTransitionTargetHAStatus();
+ if (targetState == null ||
+ HAServiceProtocol.HAServiceState.ACTIVE.equals(targetState)) {
+ // null is assumed to be same as ACTIVE, this is to be compatible
+ // with existing tests/use cases where target state is not specified
+ // but assuming it's active.
+ prefix = TARGET_PREFIX;
+ } else if (HAServiceProtocol.HAServiceState.STANDBY.equals(targetState)) {
+ prefix = SOURCE_PREFIX;
+ } else {
+ throw new IllegalArgumentException(
+ "Unexpected HA service state:" + targetState);
+ }
for (Map.Entry e :
target.getFencingParameters().entrySet()) {
- String key = TARGET_PREFIX + e.getKey();
+ String key = prefix + e.getKey();
key = key.replace('.', '_');
environment.put(key, e.getValue());
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java
index 61ea53c420ab1..09161c745dc06 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java
@@ -28,7 +28,7 @@
import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.ZKFCProtocolService;
import org.apache.hadoop.ha.protocolPB.ZKFCProtocolPB;
import org.apache.hadoop.ha.protocolPB.ZKFCProtocolServerSideTranslatorPB;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.ProtobufRpcEngine2;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RPC.Server;
import org.apache.hadoop.security.AccessControlException;
@@ -51,7 +51,7 @@ public class ZKFCRpcServer implements ZKFCProtocol {
this.zkfc = zkfc;
RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
- ProtobufRpcEngine.class);
+ ProtobufRpcEngine2.class);
ZKFCProtocolServerSideTranslatorPB translator =
new ZKFCProtocolServerSideTranslatorPB(this);
BlockingService service = ZKFCProtocolService
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
index e53820cd13107..2cbfd0d0ec030 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
@@ -38,7 +38,7 @@
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto;
import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.ProtobufRpcEngine2;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.UserGroupInformation;
@@ -67,7 +67,7 @@ public class HAServiceProtocolClientSideTranslatorPB implements
public HAServiceProtocolClientSideTranslatorPB(InetSocketAddress addr,
Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
- ProtobufRpcEngine.class);
+ ProtobufRpcEngine2.class);
rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
RPC.getProtocolVersion(HAServiceProtocolPB.class), addr, conf);
}
@@ -76,7 +76,7 @@ public HAServiceProtocolClientSideTranslatorPB(
InetSocketAddress addr, Configuration conf,
SocketFactory socketFactory, int timeout) throws IOException {
RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
- ProtobufRpcEngine.class);
+ ProtobufRpcEngine2.class);
rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
RPC.getProtocolVersion(HAServiceProtocolPB.class), addr,
UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java
index 7001d93995f0f..3777207c7e45c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java
@@ -28,7 +28,7 @@
import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.CedeActiveRequestProto;
import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.GracefulFailoverRequestProto;
import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.ProtobufRpcEngine2;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.AccessControlException;
@@ -48,7 +48,7 @@ public ZKFCProtocolClientSideTranslatorPB(
InetSocketAddress addr, Configuration conf,
SocketFactory socketFactory, int timeout) throws IOException {
RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
- ProtobufRpcEngine.class);
+ ProtobufRpcEngine2.class);
rpcProxy = RPC.getProxy(ZKFCProtocolPB.class,
RPC.getProtocolVersion(ZKFCProtocolPB.class), addr,
UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 3fd74f0e89a27..8b69d57e8120e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -1346,7 +1346,11 @@ private void bindForPortRange(ServerConnector listener, int startPort)
try {
bindListener(listener);
return;
- } catch (BindException ex) {
+ } catch (IOException ex) {
+ if (!(ex instanceof BindException)
+ && !(ex.getCause() instanceof BindException)) {
+ throw ex;
+ }
// Ignore exception. Move to next port.
ioException = ex;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java
index fc64697bb8c75..915427f8e1845 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java
@@ -19,7 +19,7 @@
import java.io.IOException;
import java.security.Principal;
-import java.util.HashMap;
+import java.util.Collections;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
@@ -121,14 +121,10 @@ public void init(FilterConfig conf) throws ServletException {
@Override
public void initFilter(FilterContainer container, Configuration conf) {
- HashMap options = new HashMap();
-
String username = getUsernameFromConf(conf);
- options.put(HADOOP_HTTP_STATIC_USER, username);
- container.addFilter("static_user_filter",
- StaticUserFilter.class.getName(),
- options);
+ container.addFilter("static_user_filter", StaticUserFilter.class.getName(),
+ Collections.singletonMap(HADOOP_HTTP_STATIC_USER, username));
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java
index 9d3c3c1ceeaa7..f14d99227c7cc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java
@@ -272,7 +272,7 @@ private static void checkStat(File f, String owner, String group,
UserGroupInformation.createRemoteUser(expectedOwner);
final String adminsGroupString = "Administrators";
success = owner.equals(adminsGroupString)
- && ugi.getGroups().contains(adminsGroupString);
+ && ugi.getGroupsSet().contains(adminsGroupString);
} else {
success = false;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index 64824a15cd89c..6db00d724aa35 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -35,6 +35,7 @@
import java.lang.reflect.Proxy;
import java.util.Arrays;
import java.util.Collections;
+import java.util.HashSet;
import java.util.Map;
/**
@@ -312,6 +313,8 @@ public String toString() {
private volatile boolean hasSuccessfulCall = false;
+ private HashSet failedAtLeastOnce = new HashSet<>();
+
private final RetryPolicy defaultPolicy;
private final Map methodNameToPolicyMap;
@@ -390,12 +393,18 @@ private RetryInfo handleException(final Method method, final int callId,
private void log(final Method method, final boolean isFailover,
final int failovers, final long delay, final Exception ex) {
- // log info if this has made some successful calls or
- // this is not the first failover
- final boolean info = hasSuccessfulCall || failovers != 0
- || asyncCallHandler.hasSuccessfulCall();
- if (!info && !LOG.isDebugEnabled()) {
- return;
+ boolean info = true;
+ // If this is the first failover to this proxy, skip logging at INFO level
+ if (!failedAtLeastOnce.contains(proxyDescriptor.getProxyInfo().toString()))
+ {
+ failedAtLeastOnce.add(proxyDescriptor.getProxyInfo().toString());
+
+ // If successful calls were made to this proxy, log info even for first
+ // failover
+ info = hasSuccessfulCall || asyncCallHandler.hasSuccessfulCall();
+ if (!info && !LOG.isDebugEnabled()) {
+ return;
+ }
}
final StringBuilder b = new StringBuilder()
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 688eed647c209..6240f859cf786 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -649,6 +649,7 @@ private synchronized boolean updateAddress() throws IOException {
private synchronized void setupConnection(
UserGroupInformation ticket) throws IOException {
+ LOG.debug("Setup connection to " + server.toString());
short ioFailures = 0;
short timeoutFailures = 0;
while (true) {
@@ -711,8 +712,16 @@ private synchronized void setupConnection(
} catch (IOException ie) {
if (updateAddress()) {
timeoutFailures = ioFailures = 0;
+ try {
+ // HADOOP-17068: when server changed, ignore the exception.
+ handleConnectionFailure(ioFailures++, ie);
+ } catch (IOException ioe) {
+ LOG.warn("Exception when handle ConnectionFailure: "
+ + ioe.getMessage());
+ }
+ } else {
+ handleConnectionFailure(ioFailures++, ie);
}
- handleConnectionFailure(ioFailures++, ie);
}
}
}
@@ -1277,7 +1286,7 @@ private synchronized void close() {
cleanupCalls();
}
} else {
- // log the info
+ // Log the newest server information if update address.
if (LOG.isDebugEnabled()) {
LOG.debug("closing ipc connection to " + server + ": " +
closeException.getMessage(),closeException);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
index 3e952eb63c3ff..45cbd4e99dff8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
@@ -43,6 +43,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.metrics.DecayRpcSchedulerDetailedMetrics;
+import org.apache.hadoop.ipc.metrics.RpcMetrics;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource;
@@ -632,8 +633,8 @@ public void addResponseTime(String callName, Schedulable schedulable,
addCost(user, processingCost);
int priorityLevel = schedulable.getPriorityLevel();
- long queueTime = details.get(Timing.QUEUE, TimeUnit.MILLISECONDS);
- long processingTime = details.get(Timing.PROCESSING, TimeUnit.MILLISECONDS);
+ long queueTime = details.get(Timing.QUEUE, RpcMetrics.TIMEUNIT);
+ long processingTime = details.get(Timing.PROCESSING, RpcMetrics.TIMEUNIT);
this.decayRpcSchedulerDetailedMetrics.addQueueTime(
priorityLevel, queueTime);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java
index bb86cfc35bf4e..1e110b9011313 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java
@@ -53,6 +53,23 @@ public static IOException getRemoteException(ServiceException se) {
return e instanceof IOException ? (IOException) e : new IOException(se);
}
+ /**
+ * Kept for backward compatible.
+ * Return the IOException thrown by the remote server wrapped in
+ * ServiceException as cause.
+ * @param se ServiceException that wraps IO exception thrown by the server
+ * @return Exception wrapped in ServiceException or
+ * a new IOException that wraps the unexpected ServiceException.
+ */
+ @Deprecated
+ public static IOException getRemoteException(
+ com.google.protobuf.ServiceException se) {
+ Throwable e = se.getCause();
+ if (e == null) {
+ return new IOException(se);
+ }
+ return e instanceof IOException ? (IOException) e : new IOException(se);
+ }
/**
* Map used to cache fixed strings to ByteStrings. Since there is no
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 14b356f847acf..220ad1ded9fec 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -19,8 +19,11 @@
package org.apache.hadoop.ipc;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.thirdparty.protobuf.*;
-import org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor;
+import com.google.protobuf.BlockingService;
+import com.google.protobuf.Descriptors.MethodDescriptor;
+import com.google.protobuf.Message;
+import com.google.protobuf.ServiceException;
+import com.google.protobuf.TextFormat;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -29,6 +32,7 @@
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.ipc.Client.ConnectionId;
import org.apache.hadoop.ipc.RPC.RpcInvoker;
+import org.apache.hadoop.ipc.RPC.RpcKind;
import org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager;
@@ -52,7 +56,10 @@
/**
* RPC Engine for for protobuf based RPCs.
+ * This engine uses Protobuf 2.5.0. Recommended to upgrade to Protobuf 3.x
+ * from hadoop-thirdparty and use ProtobufRpcEngine2.
*/
+@Deprecated
@InterfaceStability.Evolving
public class ProtobufRpcEngine implements RpcEngine {
public static final Logger LOG =
@@ -355,6 +362,7 @@ public static class Server extends RPC.Server {
new ThreadLocal<>();
static final ThreadLocal currentCallInfo = new ThreadLocal<>();
+ private static final RpcInvoker RPC_INVOKER = new ProtoBufRpcInvoker();
static class CallInfo {
private final RPC.Server server;
@@ -433,7 +441,15 @@ public Server(Class> protocolClass, Object protocolImpl,
registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass,
protocolImpl);
}
-
+
+ @Override
+ protected RpcInvoker getServerRpcInvoker(RpcKind rpcKind) {
+ if (rpcKind == RpcKind.RPC_PROTOCOL_BUFFER) {
+ return RPC_INVOKER;
+ }
+ return super.getServerRpcInvoker(rpcKind);
+ }
+
/**
* Protobuf invoker for {@link RpcInvoker}
*/
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java
new file mode 100644
index 0000000000000..30315343962c8
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java
@@ -0,0 +1,598 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.thirdparty.protobuf.*;
+import org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.ipc.Client.ConnectionId;
+import org.apache.hadoop.ipc.RPC.RpcInvoker;
+import org.apache.hadoop.ipc.protobuf.ProtobufRpcEngine2Protos.RequestHeaderProto;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.concurrent.AsyncGet;
+import org.apache.htrace.core.TraceScope;
+import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.net.SocketFactory;
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.net.InetSocketAddress;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * RPC Engine for for protobuf based RPCs.
+ */
+@InterfaceStability.Evolving
+public class ProtobufRpcEngine2 implements RpcEngine {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(ProtobufRpcEngine2.class);
+ private static final ThreadLocal>
+ ASYNC_RETURN_MESSAGE = new ThreadLocal<>();
+
+ static { // Register the rpcRequest deserializer for ProtobufRpcEngine
+ org.apache.hadoop.ipc.Server.registerProtocolEngine(
+ RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcProtobufRequest.class,
+ new Server.ProtoBufRpcInvoker());
+ }
+
+ private static final ClientCache CLIENTS = new ClientCache();
+
+ @Unstable
+ public static AsyncGet getAsyncReturnMessage() {
+ return ASYNC_RETURN_MESSAGE.get();
+ }
+
+ public ProtocolProxy getProxy(Class protocol, long clientVersion,
+ InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
+ SocketFactory factory, int rpcTimeout) throws IOException {
+ return getProxy(protocol, clientVersion, addr, ticket, conf, factory,
+ rpcTimeout, null);
+ }
+
+ @Override
+ public ProtocolProxy getProxy(
+ Class protocol, long clientVersion,
+ InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
+ SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy)
+ throws IOException {
+ return getProxy(protocol, clientVersion, addr, ticket, conf, factory,
+ rpcTimeout, connectionRetryPolicy, null, null);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public ProtocolProxy getProxy(Class protocol, long clientVersion,
+ InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
+ SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy,
+ AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext)
+ throws IOException {
+
+ final Invoker invoker = new Invoker(protocol, addr, ticket, conf, factory,
+ rpcTimeout, connectionRetryPolicy, fallbackToSimpleAuth,
+ alignmentContext);
+ return new ProtocolProxy(protocol, (T) Proxy.newProxyInstance(
+ protocol.getClassLoader(), new Class[]{protocol}, invoker), false);
+ }
+
+ @Override
+ public ProtocolProxy getProtocolMetaInfoProxy(
+ ConnectionId connId, Configuration conf, SocketFactory factory)
+ throws IOException {
+ Class protocol = ProtocolMetaInfoPB.class;
+ return new ProtocolProxy(protocol,
+ (ProtocolMetaInfoPB) Proxy.newProxyInstance(protocol.getClassLoader(),
+ new Class[]{protocol}, new Invoker(protocol, connId, conf,
+ factory)), false);
+ }
+
+ private static final class Invoker implements RpcInvocationHandler {
+ private final Map returnTypes =
+ new ConcurrentHashMap();
+ private boolean isClosed = false;
+ private final Client.ConnectionId remoteId;
+ private final Client client;
+ private final long clientProtocolVersion;
+ private final String protocolName;
+ private AtomicBoolean fallbackToSimpleAuth;
+ private AlignmentContext alignmentContext;
+
+ private Invoker(Class> protocol, InetSocketAddress addr,
+ UserGroupInformation ticket, Configuration conf, SocketFactory factory,
+ int rpcTimeout, RetryPolicy connectionRetryPolicy,
+ AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext)
+ throws IOException {
+ this(protocol, Client.ConnectionId.getConnectionId(
+ addr, protocol, ticket, rpcTimeout, connectionRetryPolicy, conf),
+ conf, factory);
+ this.fallbackToSimpleAuth = fallbackToSimpleAuth;
+ this.alignmentContext = alignmentContext;
+ }
+
+ /**
+ * This constructor takes a connectionId, instead of creating a new one.
+ */
+ private Invoker(Class> protocol, Client.ConnectionId connId,
+ Configuration conf, SocketFactory factory) {
+ this.remoteId = connId;
+ this.client = CLIENTS.getClient(conf, factory, RpcWritable.Buffer.class);
+ this.protocolName = RPC.getProtocolName(protocol);
+ this.clientProtocolVersion = RPC
+ .getProtocolVersion(protocol);
+ }
+
+ private RequestHeaderProto constructRpcRequestHeader(Method method) {
+ RequestHeaderProto.Builder builder = RequestHeaderProto
+ .newBuilder();
+ builder.setMethodName(method.getName());
+
+
+ // For protobuf, {@code protocol} used when creating client side proxy is
+ // the interface extending BlockingInterface, which has the annotations
+ // such as ProtocolName etc.
+ //
+ // Using Method.getDeclaringClass(), as in WritableEngine to get at
+ // the protocol interface will return BlockingInterface, from where
+ // the annotation ProtocolName and Version cannot be
+ // obtained.
+ //
+ // Hence we simply use the protocol class used to create the proxy.
+ // For PB this may limit the use of mixins on client side.
+ builder.setDeclaringClassProtocolName(protocolName);
+ builder.setClientProtocolVersion(clientProtocolVersion);
+ return builder.build();
+ }
+
+ /**
+ * This is the client side invoker of RPC method. It only throws
+ * ServiceException, since the invocation proxy expects only
+ * ServiceException to be thrown by the method in case protobuf service.
+ *
+ * ServiceException has the following causes:
+ *
+ * Exceptions encountered on the client side in this method are
+ * set as cause in ServiceException as is.
+ * Exceptions from the server are wrapped in RemoteException and are
+ * set as cause in ServiceException
+ *
+ *
+ * Note that the client calling protobuf RPC methods, must handle
+ * ServiceException by getting the cause from the ServiceException. If the
+ * cause is RemoteException, then unwrap it to get the exception thrown by
+ * the server.
+ */
+ @Override
+ public Message invoke(Object proxy, final Method method, Object[] args)
+ throws ServiceException {
+ long startTime = 0;
+ if (LOG.isDebugEnabled()) {
+ startTime = Time.now();
+ }
+
+ if (args.length != 2) { // RpcController + Message
+ throw new ServiceException(
+ "Too many or few parameters for request. Method: ["
+ + method.getName() + "]" + ", Expected: 2, Actual: "
+ + args.length);
+ }
+ if (args[1] == null) {
+ throw new ServiceException("null param while calling Method: ["
+ + method.getName() + "]");
+ }
+
+ // if Tracing is on then start a new span for this rpc.
+ // guard it in the if statement to make sure there isn't
+ // any extra string manipulation.
+ Tracer tracer = Tracer.curThreadTracer();
+ TraceScope traceScope = null;
+ if (tracer != null) {
+ traceScope = tracer.newScope(RpcClientUtil.methodToTraceString(method));
+ }
+
+ RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
+
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(Thread.currentThread().getId() + ": Call -> " +
+ remoteId + ": " + method.getName() +
+ " {" + TextFormat.shortDebugString((Message) args[1]) + "}");
+ }
+
+
+ final Message theRequest = (Message) args[1];
+ final RpcWritable.Buffer val;
+ try {
+ val = (RpcWritable.Buffer) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+ new RpcProtobufRequest(rpcRequestHeader, theRequest), remoteId,
+ fallbackToSimpleAuth, alignmentContext);
+
+ } catch (Throwable e) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(Thread.currentThread().getId() + ": Exception <- " +
+ remoteId + ": " + method.getName() +
+ " {" + e + "}");
+ }
+ if (traceScope != null) {
+ traceScope.addTimelineAnnotation("Call got exception: " +
+ e.toString());
+ }
+ throw new ServiceException(e);
+ } finally {
+ if (traceScope != null) {
+ traceScope.close();
+ }
+ }
+
+ if (LOG.isDebugEnabled()) {
+ long callTime = Time.now() - startTime;
+ LOG.debug("Call: " + method.getName() + " took " + callTime + "ms");
+ }
+
+ if (Client.isAsynchronousMode()) {
+ final AsyncGet arr
+ = Client.getAsyncRpcResponse();
+ final AsyncGet asyncGet =
+ new AsyncGet() {
+ @Override
+ public Message get(long timeout, TimeUnit unit) throws Exception {
+ return getReturnMessage(method, arr.get(timeout, unit));
+ }
+
+ @Override
+ public boolean isDone() {
+ return arr.isDone();
+ }
+ };
+ ASYNC_RETURN_MESSAGE.set(asyncGet);
+ return null;
+ } else {
+ return getReturnMessage(method, val);
+ }
+ }
+
+ private Message getReturnMessage(final Method method,
+ final RpcWritable.Buffer buf) throws ServiceException {
+ Message prototype = null;
+ try {
+ prototype = getReturnProtoType(method);
+ } catch (Exception e) {
+ throw new ServiceException(e);
+ }
+ Message returnMessage;
+ try {
+ returnMessage = buf.getValue(prototype.getDefaultInstanceForType());
+
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(Thread.currentThread().getId() + ": Response <- " +
+ remoteId + ": " + method.getName() +
+ " {" + TextFormat.shortDebugString(returnMessage) + "}");
+ }
+
+ } catch (Throwable e) {
+ throw new ServiceException(e);
+ }
+ return returnMessage;
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (!isClosed) {
+ isClosed = true;
+ CLIENTS.stopClient(client);
+ }
+ }
+
+ private Message getReturnProtoType(Method method) throws Exception {
+ if (returnTypes.containsKey(method.getName())) {
+ return returnTypes.get(method.getName());
+ }
+
+ Class> returnType = method.getReturnType();
+ Method newInstMethod = returnType.getMethod("getDefaultInstance");
+ newInstMethod.setAccessible(true);
+ Message prototype = (Message) newInstMethod.invoke(null, (Object[]) null);
+ returnTypes.put(method.getName(), prototype);
+ return prototype;
+ }
+
+ @Override //RpcInvocationHandler
+ public ConnectionId getConnectionId() {
+ return remoteId;
+ }
+ }
+
+ @VisibleForTesting
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ static Client getClient(Configuration conf) {
+ return CLIENTS.getClient(conf, SocketFactory.getDefault(),
+ RpcWritable.Buffer.class);
+ }
+
+
+
+ @Override
+ public RPC.Server getServer(Class> protocol, Object protocolImpl,
+ String bindAddress, int port, int numHandlers, int numReaders,
+ int queueSizePerHandler, boolean verbose, Configuration conf,
+ SecretManager extends TokenIdentifier> secretManager,
+ String portRangeConfig, AlignmentContext alignmentContext)
+ throws IOException {
+ return new Server(protocol, protocolImpl, conf, bindAddress, port,
+ numHandlers, numReaders, queueSizePerHandler, verbose, secretManager,
+ portRangeConfig, alignmentContext);
+ }
+
+ public static class Server extends RPC.Server {
+
+ static final ThreadLocal CURRENT_CALLBACK =
+ new ThreadLocal<>();
+
+ static final ThreadLocal CURRENT_CALL_INFO = new ThreadLocal<>();
+
+ static class CallInfo {
+ private final RPC.Server server;
+ private final String methodName;
+
+ CallInfo(RPC.Server server, String methodName) {
+ this.server = server;
+ this.methodName = methodName;
+ }
+ }
+
+ static class ProtobufRpcEngineCallbackImpl
+ implements ProtobufRpcEngineCallback2 {
+
+ private final RPC.Server server;
+ private final Call call;
+ private final String methodName;
+ private final long setupTime;
+
+ ProtobufRpcEngineCallbackImpl() {
+ this.server = CURRENT_CALL_INFO.get().server;
+ this.call = Server.getCurCall().get();
+ this.methodName = CURRENT_CALL_INFO.get().methodName;
+ this.setupTime = Time.now();
+ }
+
+ @Override
+ public void setResponse(Message message) {
+ long processingTime = Time.now() - setupTime;
+ call.setDeferredResponse(RpcWritable.wrap(message));
+ server.updateDeferredMetrics(methodName, processingTime);
+ }
+
+ @Override
+ public void error(Throwable t) {
+ long processingTime = Time.now() - setupTime;
+ String detailedMetricsName = t.getClass().getSimpleName();
+ server.updateDeferredMetrics(detailedMetricsName, processingTime);
+ call.setDeferredError(t);
+ }
+ }
+
+ @InterfaceStability.Unstable
+ public static ProtobufRpcEngineCallback2 registerForDeferredResponse() {
+ ProtobufRpcEngineCallback2 callback = new ProtobufRpcEngineCallbackImpl();
+ CURRENT_CALLBACK.set(callback);
+ return callback;
+ }
+
+ /**
+ * Construct an RPC server.
+ *
+ * @param protocolClass the class of protocol
+ * @param protocolImpl the protocolImpl whose methods will be called
+ * @param conf the configuration to use
+ * @param bindAddress the address to bind on to listen for connection
+ * @param port the port to listen for connections on
+ * @param numHandlers the number of method handler threads to run
+ * @param verbose whether each call should be logged
+ * @param portRangeConfig A config parameter that can be used to restrict
+ * the range of ports used when port is 0 (an ephemeral port)
+ * @param alignmentContext provides server state info on client responses
+ */
+ public Server(Class> protocolClass, Object protocolImpl,
+ Configuration conf, String bindAddress, int port, int numHandlers,
+ int numReaders, int queueSizePerHandler, boolean verbose,
+ SecretManager extends TokenIdentifier> secretManager,
+ String portRangeConfig, AlignmentContext alignmentContext)
+ throws IOException {
+ super(bindAddress, port, null, numHandlers,
+ numReaders, queueSizePerHandler, conf,
+ serverNameFromClass(protocolImpl.getClass()), secretManager,
+ portRangeConfig);
+ setAlignmentContext(alignmentContext);
+ this.verbose = verbose;
+ registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass,
+ protocolImpl);
+ }
+
+ /**
+ * Protobuf invoker for {@link RpcInvoker}.
+ */
+ static class ProtoBufRpcInvoker implements RpcInvoker {
+ private static ProtoClassProtoImpl getProtocolImpl(RPC.Server server,
+ String protoName, long clientVersion) throws RpcServerException {
+ ProtoNameVer pv = new ProtoNameVer(protoName, clientVersion);
+ ProtoClassProtoImpl impl =
+ server.getProtocolImplMap(RPC.RpcKind.RPC_PROTOCOL_BUFFER).get(pv);
+ if (impl == null) { // no match for Protocol AND Version
+ VerProtocolImpl highest = server.getHighestSupportedProtocol(
+ RPC.RpcKind.RPC_PROTOCOL_BUFFER, protoName);
+ if (highest == null) {
+ throw new RpcNoSuchProtocolException(
+ "Unknown protocol: " + protoName);
+ }
+ // protocol supported but not the version that client wants
+ throw new RPC.VersionMismatch(protoName, clientVersion,
+ highest.version);
+ }
+ return impl;
+ }
+
+ @Override
+ /**
+ * This is a server side method, which is invoked over RPC. On success
+ * the return response has protobuf response payload. On failure, the
+ * exception name and the stack trace are returned in the response.
+ * See {@link HadoopRpcResponseProto}
+ *
+ * In this method there three types of exceptions possible and they are
+ * returned in response as follows.
+ *
+ * Exceptions encountered in this method that are returned
+ * as {@link RpcServerException}
+ * Exceptions thrown by the service is wrapped in ServiceException.
+ * In that this method returns in response the exception thrown by the
+ * service.
+ * Other exceptions thrown by the service. They are returned as
+ * it is.
+ *
+ */
+ public Writable call(RPC.Server server, String connectionProtocolName,
+ Writable writableRequest, long receiveTime) throws Exception {
+ RpcProtobufRequest request = (RpcProtobufRequest) writableRequest;
+ RequestHeaderProto rpcRequest = request.getRequestHeader();
+ String methodName = rpcRequest.getMethodName();
+
+ /**
+ * RPCs for a particular interface (ie protocol) are done using a
+ * IPC connection that is setup using rpcProxy.
+ * The rpcProxy's has a declared protocol name that is
+ * sent form client to server at connection time.
+ *
+ * Each Rpc call also sends a protocol name
+ * (called declaringClassprotocolName). This name is usually the same
+ * as the connection protocol name except in some cases.
+ * For example metaProtocols such ProtocolInfoProto which get info
+ * about the protocol reuse the connection but need to indicate that
+ * the actual protocol is different (i.e. the protocol is
+ * ProtocolInfoProto) since they reuse the connection; in this case
+ * the declaringClassProtocolName field is set to the ProtocolInfoProto.
+ */
+
+ String declaringClassProtoName =
+ rpcRequest.getDeclaringClassProtocolName();
+ long clientVersion = rpcRequest.getClientProtocolVersion();
+ if (server.verbose) {
+ LOG.info("Call: connectionProtocolName=" + connectionProtocolName +
+ ", method=" + methodName);
+ }
+
+ ProtoClassProtoImpl protocolImpl = getProtocolImpl(server,
+ declaringClassProtoName, clientVersion);
+ BlockingService service = (BlockingService) protocolImpl.protocolImpl;
+ MethodDescriptor methodDescriptor = service.getDescriptorForType()
+ .findMethodByName(methodName);
+ if (methodDescriptor == null) {
+ String msg = "Unknown method " + methodName + " called on "
+ + connectionProtocolName + " protocol.";
+ LOG.warn(msg);
+ throw new RpcNoSuchMethodException(msg);
+ }
+ Message prototype = service.getRequestPrototype(methodDescriptor);
+ Message param = request.getValue(prototype);
+
+ Message result;
+ Call currentCall = Server.getCurCall().get();
+ try {
+ server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
+ CURRENT_CALL_INFO.set(new CallInfo(server, methodName));
+ currentCall.setDetailedMetricsName(methodName);
+ result = service.callBlockingMethod(methodDescriptor, null, param);
+ // Check if this needs to be a deferred response,
+ // by checking the ThreadLocal callback being set
+ if (CURRENT_CALLBACK.get() != null) {
+ currentCall.deferResponse();
+ CURRENT_CALLBACK.set(null);
+ return null;
+ }
+ } catch (ServiceException e) {
+ Exception exception = (Exception) e.getCause();
+ currentCall.setDetailedMetricsName(
+ exception.getClass().getSimpleName());
+ throw (Exception) e.getCause();
+ } catch (Exception e) {
+ currentCall.setDetailedMetricsName(e.getClass().getSimpleName());
+ throw e;
+ } finally {
+ CURRENT_CALL_INFO.set(null);
+ }
+ return RpcWritable.wrap(result);
+ }
+ }
+ }
+
+ // htrace in the ipc layer creates the span name based on toString()
+ // which uses the rpc header. in the normal case we want to defer decoding
+ // the rpc header until needed by the rpc engine.
+ static class RpcProtobufRequest extends RpcWritable.Buffer {
+ private volatile RequestHeaderProto requestHeader;
+ private Message payload;
+
+ RpcProtobufRequest() {
+ }
+
+ RpcProtobufRequest(RequestHeaderProto header, Message payload) {
+ this.requestHeader = header;
+ this.payload = payload;
+ }
+
+ RequestHeaderProto getRequestHeader() throws IOException {
+ if (getByteBuffer() != null && requestHeader == null) {
+ requestHeader = getValue(RequestHeaderProto.getDefaultInstance());
+ }
+ return requestHeader;
+ }
+
+ @Override
+ public void writeTo(ResponseBuffer out) throws IOException {
+ requestHeader.writeDelimitedTo(out);
+ if (payload != null) {
+ payload.writeDelimitedTo(out);
+ }
+ }
+
+ // this is used by htrace to name the span.
+ @Override
+ public String toString() {
+ try {
+ RequestHeaderProto header = getRequestHeader();
+ return header.getDeclaringClassProtocolName() + "." +
+ header.getMethodName();
+ } catch (IOException e) {
+ throw new IllegalArgumentException(e);
+ }
+ }
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java
index 50b70ca4bec1a..f85adb17d3f8e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java
@@ -18,12 +18,17 @@
package org.apache.hadoop.ipc;
-import org.apache.hadoop.thirdparty.protobuf.Message;
+import com.google.protobuf.Message;
+/**
+ * This engine uses Protobuf 2.5.0. Recommended to upgrade to Protobuf 3.x
+ * from hadoop-thirdparty and use ProtobufRpcEngineCallback2.
+ */
+@Deprecated
public interface ProtobufRpcEngineCallback {
- public void setResponse(Message message);
+ void setResponse(Message message);
- public void error(Throwable t);
+ void error(Throwable t);
}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback2.java
new file mode 100644
index 0000000000000..e8c09f56282e6
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback2.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import org.apache.hadoop.thirdparty.protobuf.Message;
+
+public interface ProtobufRpcEngineCallback2 {
+
+ public void setResponse(Message message);
+
+ public void error(Throwable t);
+
+}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 4f95863b03db6..e794cb913c232 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -1043,7 +1043,7 @@ protected Server(String bindAddress, int port,
private void initProtocolMetaInfo(Configuration conf) {
RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class,
- ProtobufRpcEngine.class);
+ ProtobufRpcEngine2.class);
ProtocolMetaInfoServerSideTranslatorPB xlator =
new ProtocolMetaInfoServerSideTranslatorPB(this);
BlockingService protocolInfoBlockingService = ProtocolInfoService
@@ -1067,7 +1067,7 @@ public Server addProtocol(RpcKind rpcKind, Class> protocolClass,
@Override
public Writable call(RPC.RpcKind rpcKind, String protocol,
Writable rpcRequest, long receiveTime) throws Exception {
- return getRpcInvoker(rpcKind).call(this, protocol, rpcRequest,
+ return getServerRpcInvoker(rpcKind).call(this, protocol, rpcRequest,
receiveTime);
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java
index 84ecba1d34e9c..0ce78e54a43a0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java
@@ -114,7 +114,7 @@ public static boolean isMethodSupported(Object rpcProxy, Class> protocol,
if (versionMap == null) {
Configuration conf = new Configuration();
RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class,
- ProtobufRpcEngine.class);
+ ProtobufRpcEngine2.class);
ProtocolMetaInfoPB protocolInfoProxy = getProtocolMetaInfoProxy(rpcProxy,
conf);
GetProtocolSignatureRequestProto.Builder builder =
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java
index 63812f47f2db0..5202c6b356177 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.ipc;
-import java.util.concurrent.TimeUnit;
+import org.apache.hadoop.ipc.metrics.RpcMetrics;
/**
* Implement this interface to be used for RPC scheduling and backoff.
@@ -62,12 +62,12 @@ default void addResponseTime(String callName, Schedulable schedulable,
// this interface, a default implementation is supplied which uses the old
// method. All new implementations MUST override this interface and should
// NOT use the other addResponseTime method.
- int queueTimeMs = (int)
- details.get(ProcessingDetails.Timing.QUEUE, TimeUnit.MILLISECONDS);
- int processingTimeMs = (int)
- details.get(ProcessingDetails.Timing.PROCESSING, TimeUnit.MILLISECONDS);
+ int queueTime = (int)
+ details.get(ProcessingDetails.Timing.QUEUE, RpcMetrics.TIMEUNIT);
+ int processingTime = (int)
+ details.get(ProcessingDetails.Timing.PROCESSING, RpcMetrics.TIMEUNIT);
addResponseTime(callName, schedulable.getPriorityLevel(),
- queueTimeMs, processingTimeMs);
+ queueTime, processingTime);
}
void stop();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java
index 6604bd0cc1c68..f5f0d071f39ed 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java
@@ -42,6 +42,8 @@ static RpcWritable wrap(Object o) {
return (RpcWritable)o;
} else if (o instanceof Message) {
return new ProtobufWrapper((Message)o);
+ } else if (o instanceof com.google.protobuf.Message) {
+ return new ProtobufWrapperLegacy((com.google.protobuf.Message) o);
} else if (o instanceof Writable) {
return new WritableWrapper((Writable)o);
}
@@ -132,6 +134,49 @@ T readFrom(ByteBuffer bb) throws IOException {
}
}
+ // adapter for Protobufs.
+ static class ProtobufWrapperLegacy extends RpcWritable {
+ private com.google.protobuf.Message message;
+
+ ProtobufWrapperLegacy(com.google.protobuf.Message message) {
+ this.message = message;
+ }
+
+ com.google.protobuf.Message getMessage() {
+ return message;
+ }
+
+ @Override
+ void writeTo(ResponseBuffer out) throws IOException {
+ int length = message.getSerializedSize();
+ length += com.google.protobuf.CodedOutputStream.
+ computeUInt32SizeNoTag(length);
+ out.ensureCapacity(length);
+ message.writeDelimitedTo(out);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ T readFrom(ByteBuffer bb) throws IOException {
+ // using the parser with a byte[]-backed coded input stream is the
+ // most efficient way to deserialize a protobuf. it has a direct
+ // path to the PB ctor that doesn't create multi-layered streams
+ // that internally buffer.
+ com.google.protobuf.CodedInputStream cis =
+ com.google.protobuf.CodedInputStream.newInstance(
+ bb.array(), bb.position() + bb.arrayOffset(), bb.remaining());
+ try {
+ cis.pushLimit(cis.readRawVarint32());
+ message = message.getParserForType().parseFrom(cis);
+ cis.checkLastTagWas(0);
+ } finally {
+ // advance over the bytes read.
+ bb.position(bb.position() + cis.getTotalBytesRead());
+ }
+ return (T)message;
+ }
+ }
+
/**
* adapter to allow decoding of writables and protobufs from a byte buffer.
*/
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 4448164f4b137..907d55f9be347 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -304,7 +304,11 @@ public Class extends Writable> getRpcRequestWrapper(
RpcKindMapValue val = rpcKindMap.get(ProtoUtil.convert(rpcKind));
return (val == null) ? null : val.rpcRequestWrapperClass;
}
-
+
+ protected RpcInvoker getServerRpcInvoker(RPC.RpcKind rpcKind) {
+ return getRpcInvoker(rpcKind);
+ }
+
public static RpcInvoker getRpcInvoker(RPC.RpcKind rpcKind) {
RpcKindMapValue val = rpcKindMap.get(rpcKind);
return (val == null) ? null : val.rpcInvoker;
@@ -2688,15 +2692,15 @@ private void processRpcRequest(RpcRequestHeaderProto header,
call.setPriorityLevel(callQueue.getPriorityLevel(call));
call.markCallCoordinated(false);
if(alignmentContext != null && call.rpcRequest != null &&
- (call.rpcRequest instanceof ProtobufRpcEngine.RpcProtobufRequest)) {
+ (call.rpcRequest instanceof ProtobufRpcEngine2.RpcProtobufRequest)) {
// if call.rpcRequest is not RpcProtobufRequest, will skip the following
// step and treat the call as uncoordinated. As currently only certain
// ClientProtocol methods request made through RPC protobuf needs to be
// coordinated.
String methodName;
String protoName;
- ProtobufRpcEngine.RpcProtobufRequest req =
- (ProtobufRpcEngine.RpcProtobufRequest) call.rpcRequest;
+ ProtobufRpcEngine2.RpcProtobufRequest req =
+ (ProtobufRpcEngine2.RpcProtobufRequest) call.rpcRequest;
try {
methodName = req.getRequestHeader().getMethodName();
protoName = req.getRequestHeader().getDeclaringClassProtocolName();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
index 624edc96b8ae7..cf4b4a9810c4f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
@@ -273,7 +273,11 @@ void registerSource(String name, String desc, MetricsSource source) {
T register(final String name, final String description, final T sink) {
LOG.debug(name +", "+ description);
if (allSinks.containsKey(name)) {
- LOG.warn("Sink "+ name +" already exists!");
+ if(sinks.get(name) == null) {
+ registerSink(name, description, sink);
+ } else {
+ LOG.warn("Sink "+ name +" already exists!");
+ }
return sink;
}
allSinks.put(name, sink);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java
index 1b50498bbaf5a..4aef03a5e645f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.metrics2.util;
import java.lang.management.ManagementFactory;
-import java.util.HashMap;
+import java.util.Collections;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@@ -70,8 +70,7 @@ private MBeans() {
*/
static public ObjectName register(String serviceName, String nameName,
Object theMbean) {
- return register(serviceName, nameName, new HashMap(),
- theMbean);
+ return register(serviceName, nameName, Collections.emptyMap(), theMbean);
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java
index af487ed5c61a5..803fcec8d6c77 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java
@@ -130,8 +130,7 @@ public List resolve(List names) {
*/
@Override
public Map getSwitchMap() {
- Map switchMap = new HashMap(cache);
- return switchMap;
+ return new HashMap<>(cache);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 9f52fed9678b9..893012befcf44 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -196,10 +196,8 @@ public List getDatanodesInRack(String loc) {
loc = loc.substring(1);
}
InnerNode rack = (InnerNode) clusterMap.getLoc(loc);
- if (rack == null) {
- return null;
- }
- return new ArrayList(rack.getChildren());
+ return (rack == null) ? new ArrayList<>(0)
+ : new ArrayList<>(rack.getChildren());
} finally {
netlock.readLock().unlock();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
index cd3514c4bce16..2beda8401f8d1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
@@ -25,6 +25,7 @@
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -130,7 +131,7 @@ public synchronized List resolve(List names) {
if (map == null) {
LOG.warn("Failed to read topology table. " +
NetworkTopology.DEFAULT_RACK + " will be used for all nodes.");
- map = new HashMap();
+ map = Collections.emptyMap();
}
}
List results = new ArrayList(names.size());
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
index b762df2acc022..6f799c1542095 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
@@ -19,6 +19,8 @@
import java.io.IOException;
import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -69,8 +71,8 @@ public class CompositeGroupsMapping
public synchronized List getGroups(String user) throws IOException {
Set groupSet = new TreeSet();
- List groups = null;
for (GroupMappingServiceProvider provider : providersList) {
+ List groups = Collections.emptyList();
try {
groups = provider.getGroups(user);
} catch (Exception e) {
@@ -78,17 +80,15 @@ public synchronized List getGroups(String user) throws IOException {
user, provider.getClass().getSimpleName(), e.toString());
LOG.debug("Stacktrace: ", e);
}
- if (groups != null && ! groups.isEmpty()) {
+ if (!groups.isEmpty()) {
groupSet.addAll(groups);
if (!combined) break;
}
}
- List results = new ArrayList(groupSet.size());
- results.addAll(groupSet);
- return results;
+ return new ArrayList<>(groupSet);
}
-
+
/**
* Caches groups, no need to do that for this provider
*/
@@ -107,6 +107,29 @@ public void cacheGroupsAdd(List groups) throws IOException {
// does nothing in this provider of user to groups mapping
}
+ @Override
+ public synchronized Set getGroupsSet(String user) throws IOException {
+ Set groupSet = new HashSet();
+
+ Set groups = null;
+ for (GroupMappingServiceProvider provider : providersList) {
+ try {
+ groups = provider.getGroupsSet(user);
+ } catch (Exception e) {
+ LOG.warn("Unable to get groups for user {} via {} because: {}",
+ user, provider.getClass().getSimpleName(), e.toString());
+ LOG.debug("Stacktrace: ", e);
+ }
+ if (groups != null && !groups.isEmpty()) {
+ groupSet.addAll(groups);
+ if (!combined) {
+ break;
+ }
+ }
+ }
+ return groupSet;
+ }
+
@Override
public synchronized Configuration getConf() {
return conf;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java
index 8b90f5bc7af9e..ff6c86d5febf3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java
@@ -19,6 +19,7 @@
import java.io.IOException;
import java.util.List;
+import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -52,4 +53,13 @@ public interface GroupMappingServiceProvider {
* @throws IOException
*/
public void cacheGroupsAdd(List groups) throws IOException;
+
+ /**
+ * Get all various group memberships of a given user.
+ * Returns EMPTY set in case of non-existing user
+ * @param user User's name
+ * @return set of group memberships of user
+ * @throws IOException
+ */
+ Set getGroupsSet(String user) throws IOException;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
index b29278bd20751..961ec7d591924 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
@@ -26,7 +26,6 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
@@ -78,8 +77,8 @@ public class Groups {
private final GroupMappingServiceProvider impl;
- private final LoadingCache> cache;
- private final AtomicReference>> staticMapRef =
+ private final LoadingCache> cache;
+ private final AtomicReference>> staticMapRef =
new AtomicReference<>();
private final long cacheTimeout;
private final long negativeCacheTimeout;
@@ -168,8 +167,7 @@ private void parseStaticMapping(Configuration conf) {
CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES_DEFAULT);
Collection mappings = StringUtils.getStringCollection(
staticMapping, ";");
- Map> staticUserToGroupsMap =
- new HashMap>();
+ Map> staticUserToGroupsMap = new HashMap<>();
for (String users : mappings) {
Collection userToGroups = StringUtils.getStringCollection(users,
"=");
@@ -181,10 +179,10 @@ private void parseStaticMapping(Configuration conf) {
String[] userToGroupsArray = userToGroups.toArray(new String[userToGroups
.size()]);
String user = userToGroupsArray[0];
- List groups = Collections.emptyList();
+ Set groups = Collections.emptySet();
if (userToGroupsArray.length == 2) {
- groups = (List) StringUtils
- .getStringCollection(userToGroupsArray[1]);
+ groups = new LinkedHashSet(StringUtils
+ .getStringCollection(userToGroupsArray[1]));
}
staticUserToGroupsMap.put(user, groups);
}
@@ -203,15 +201,47 @@ private IOException noGroupsForUser(String user) {
/**
* Get the group memberships of a given user.
* If the user's group is not cached, this method may block.
+ * Note this method can be expensive as it involves Set->List conversion.
+ * For user with large group membership (i.e., > 1000 groups), we recommend
+ * using getGroupSet to avoid the conversion and fast membership look up via
+ * contains().
* @param user User's name
- * @return the group memberships of the user
+ * @return the group memberships of the user as list
* @throws IOException if user does not exist
+ * @deprecated Use {@link #getGroupsSet(String user)} instead.
*/
+ @Deprecated
public List getGroups(final String user) throws IOException {
+ return Collections.unmodifiableList(new ArrayList<>(
+ getGroupInternal(user)));
+ }
+
+ /**
+ * Get the group memberships of a given user.
+ * If the user's group is not cached, this method may block.
+ * This provide better performance when user has large group membership via
+ * 1) avoid set->list->set conversion for the caller UGI/PermissionCheck
+ * 2) fast lookup using contains() via Set instead of List
+ * @param user User's name
+ * @return the group memberships of the user as set
+ * @throws IOException if user does not exist
+ */
+ public Set getGroupsSet(final String user) throws IOException {
+ return Collections.unmodifiableSet(getGroupInternal(user));
+ }
+
+ /**
+ * Get the group memberships of a given user.
+ * If the user's group is not cached, this method may block.
+ * @param user User's name
+ * @return the group memberships of the user as Set
+ * @throws IOException if user does not exist
+ */
+ private Set getGroupInternal(final String user) throws IOException {
// No need to lookup for groups of static users
- Map> staticUserToGroupsMap = staticMapRef.get();
+ Map> staticUserToGroupsMap = staticMapRef.get();
if (staticUserToGroupsMap != null) {
- List staticMapping = staticUserToGroupsMap.get(user);
+ Set staticMapping = staticUserToGroupsMap.get(user);
if (staticMapping != null) {
return staticMapping;
}
@@ -267,7 +297,7 @@ public long read() {
/**
* Deals with loading data into the cache.
*/
- private class GroupCacheLoader extends CacheLoader> {
+ private class GroupCacheLoader extends CacheLoader> {
private ListeningExecutorService executorService;
@@ -308,7 +338,7 @@ private class GroupCacheLoader extends CacheLoader> {
* @throws IOException to prevent caching negative entries
*/
@Override
- public List load(String user) throws Exception {
+ public Set load(String user) throws Exception {
LOG.debug("GroupCacheLoader - load.");
TraceScope scope = null;
Tracer tracer = Tracer.curThreadTracer();
@@ -316,9 +346,9 @@ public List load(String user) throws Exception {
scope = tracer.newScope("Groups#fetchGroupList");
scope.addKVAnnotation("user", user);
}
- List groups = null;
+ Set groups = null;
try {
- groups = fetchGroupList(user);
+ groups = fetchGroupSet(user);
} finally {
if (scope != null) {
scope.close();
@@ -334,9 +364,7 @@ public List load(String user) throws Exception {
throw noGroupsForUser(user);
}
- // return immutable de-duped list
- return Collections.unmodifiableList(
- new ArrayList<>(new LinkedHashSet<>(groups)));
+ return groups;
}
/**
@@ -345,8 +373,8 @@ public List load(String user) throws Exception {
* implementation, otherwise is arranges for the cache to be updated later
*/
@Override
- public ListenableFuture> reload(final String key,
- List oldValue)
+ public ListenableFuture> reload(final String key,
+ Set oldValue)
throws Exception {
LOG.debug("GroupCacheLoader - reload (async).");
if (!reloadGroupsInBackground) {
@@ -354,19 +382,16 @@ public ListenableFuture> reload(final String key,
}
backgroundRefreshQueued.incrementAndGet();
- ListenableFuture> listenableFuture =
- executorService.submit(new Callable>() {
- @Override
- public List call() throws Exception {
- backgroundRefreshQueued.decrementAndGet();
- backgroundRefreshRunning.incrementAndGet();
- List results = load(key);
- return results;
- }
+ ListenableFuture> listenableFuture =
+ executorService.submit(() -> {
+ backgroundRefreshQueued.decrementAndGet();
+ backgroundRefreshRunning.incrementAndGet();
+ Set results = load(key);
+ return results;
});
- Futures.addCallback(listenableFuture, new FutureCallback>() {
+ Futures.addCallback(listenableFuture, new FutureCallback>() {
@Override
- public void onSuccess(List result) {
+ public void onSuccess(Set result) {
backgroundRefreshSuccess.incrementAndGet();
backgroundRefreshRunning.decrementAndGet();
}
@@ -380,11 +405,12 @@ public void onFailure(Throwable t) {
}
/**
- * Queries impl for groups belonging to the user. This could involve I/O and take awhile.
+ * Queries impl for groups belonging to the user.
+ * This could involve I/O and take awhile.
*/
- private List fetchGroupList(String user) throws IOException {
+ private Set fetchGroupSet(String user) throws IOException {
long startMs = timer.monotonicNow();
- List groupList = impl.getGroups(user);
+ Set groups = impl.getGroupsSet(user);
long endMs = timer.monotonicNow();
long deltaMs = endMs - startMs ;
UserGroupInformation.metrics.addGetGroups(deltaMs);
@@ -392,8 +418,7 @@ private List fetchGroupList(String user) throws IOException {
LOG.warn("Potential performance problem: getGroups(user=" + user +") " +
"took " + deltaMs + " milliseconds.");
}
-
- return groupList;
+ return groups;
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java
index a0f6142a3c5c7..6c24427f3e50e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java
@@ -20,8 +20,11 @@
import java.io.IOException;
import java.util.Arrays;
+import java.util.LinkedHashSet;
import java.util.List;
+import java.util.Set;
+import org.apache.commons.collections.CollectionUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -75,6 +78,18 @@ static private void logError(int groupId, String error) {
@Override
public List getGroups(String user) throws IOException {
+ return Arrays.asList(getGroupsInternal(user));
+ }
+
+ @Override
+ public Set getGroupsSet(String user) throws IOException {
+ String[] groups = getGroupsInternal(user);
+ Set result = new LinkedHashSet(groups.length);
+ CollectionUtils.addAll(result, groups);
+ return result;
+ }
+
+ private String[] getGroupsInternal(String user) throws IOException {
String[] groups = new String[0];
try {
groups = getGroupsForUser(user);
@@ -85,7 +100,7 @@ public List getGroups(String user) throws IOException {
LOG.info("Error getting groups for " + user + ": " + e.getMessage());
}
}
- return Arrays.asList(groups);
+ return groups;
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java
index f1644305d917e..cc47df1462678 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import java.util.List;
+import java.util.Set;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.PerformanceAdvisory;
@@ -61,4 +62,9 @@ public void cacheGroupsAdd(List groups) throws IOException {
impl.cacheGroupsAdd(groups);
}
+ @Override
+ public Set getGroupsSet(String user) throws IOException {
+ return impl.getGroupsSet(user);
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java
index 9ba55e436f3f8..65bd1c00333a9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
import java.util.LinkedList;
@@ -125,6 +126,6 @@ protected synchronized List getUsersForNetgroup(String netgroup) {
if (users != null && users.length != 0) {
return Arrays.asList(users);
}
- return new LinkedList();
+ return Collections.emptyList();
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java
index fcc47cb796f33..3d4bd588a5344 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import java.util.List;
+import java.util.Set;
import org.apache.hadoop.util.NativeCodeLoader;
import org.slf4j.Logger;
@@ -60,4 +61,9 @@ public void cacheGroupsAdd(List groups) throws IOException {
impl.cacheGroupsAdd(groups);
}
+ @Override
+ public Set getGroupsSet(String user) throws IOException {
+ return impl.getGroupsSet(user);
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
index 8e71f69c858d1..3f656990517af 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
@@ -33,6 +33,7 @@
import java.util.Collections;
import java.util.Hashtable;
import java.util.Iterator;
+import java.util.LinkedHashSet;
import java.util.List;
import java.util.HashSet;
import java.util.Collection;
@@ -302,12 +303,12 @@ public class LdapGroupsMapping
}
private DirContext ctx;
- private Configuration conf;
+ private volatile Configuration conf;
- private Iterator ldapUrls;
+ private volatile Iterator ldapUrls;
private String currentLdapUrl;
- private boolean useSsl;
+ private volatile boolean useSsl;
private String keystore;
private String keystorePass;
private String truststore;
@@ -320,21 +321,21 @@ public class LdapGroupsMapping
private Iterator bindUsers;
private BindUserInfo currentBindUser;
- private String userbaseDN;
+ private volatile String userbaseDN;
private String groupbaseDN;
private String groupSearchFilter;
- private String userSearchFilter;
- private String memberOfAttr;
+ private volatile String userSearchFilter;
+ private volatile String memberOfAttr;
private String groupMemberAttr;
- private String groupNameAttr;
- private int groupHierarchyLevels;
- private String posixUidAttr;
- private String posixGidAttr;
+ private volatile String groupNameAttr;
+ private volatile int groupHierarchyLevels;
+ private volatile String posixUidAttr;
+ private volatile String posixGidAttr;
private boolean isPosix;
- private boolean useOneQuery;
+ private volatile boolean useOneQuery;
private int numAttempts;
- private int numAttemptsBeforeFailover;
- private String ldapCtxFactoryClassName;
+ private volatile int numAttemptsBeforeFailover;
+ private volatile String ldapCtxFactoryClassName;
/**
* Returns list of groups for a user.
@@ -348,38 +349,7 @@ public class LdapGroupsMapping
*/
@Override
public synchronized List getGroups(String user) {
- /*
- * Normal garbage collection takes care of removing Context instances when
- * they are no longer in use. Connections used by Context instances being
- * garbage collected will be closed automatically. So in case connection is
- * closed and gets CommunicationException, retry some times with new new
- * DirContext/connection.
- */
-
- // Tracks the number of attempts made using the same LDAP server
- int atemptsBeforeFailover = 1;
-
- for (int attempt = 1; attempt <= numAttempts; attempt++,
- atemptsBeforeFailover++) {
- try {
- return doGetGroups(user, groupHierarchyLevels);
- } catch (AuthenticationException e) {
- switchBindUser(e);
- } catch (NamingException e) {
- LOG.warn("Failed to get groups for user {} (attempt={}/{}) using {}. " +
- "Exception: ", user, attempt, numAttempts, currentLdapUrl, e);
- LOG.trace("TRACE", e);
-
- if (failover(atemptsBeforeFailover, numAttemptsBeforeFailover)) {
- atemptsBeforeFailover = 0;
- }
- }
-
- // Reset ctx so that new DirContext can be created with new connection
- this.ctx = null;
- }
-
- return Collections.emptyList();
+ return new ArrayList<>(getGroupsSet(user));
}
/**
@@ -458,10 +428,10 @@ private NamingEnumeration lookupPosixGroup(SearchResult result,
* @return a list of strings representing group names of the user.
* @throws NamingException if unable to find group names
*/
- private List lookupGroup(SearchResult result, DirContext c,
+ private Set lookupGroup(SearchResult result, DirContext c,
int goUpHierarchy)
throws NamingException {
- List groups = new ArrayList<>();
+ Set groups = new LinkedHashSet<>();
Set groupDNs = new HashSet<>();
NamingEnumeration groupResults;
@@ -484,11 +454,7 @@ private List lookupGroup(SearchResult result, DirContext c,
getGroupNames(groupResult, groups, groupDNs, goUpHierarchy > 0);
}
if (goUpHierarchy > 0 && !isPosix) {
- // convert groups to a set to ensure uniqueness
- Set groupset = new HashSet<>(groups);
- goUpGroupHierarchy(groupDNs, goUpHierarchy, groupset);
- // convert set back to list for compatibility
- groups = new ArrayList<>(groupset);
+ goUpGroupHierarchy(groupDNs, goUpHierarchy, groups);
}
}
return groups;
@@ -507,7 +473,7 @@ private List lookupGroup(SearchResult result, DirContext c,
* return an empty string array.
* @throws NamingException if unable to get group names
*/
- List doGetGroups(String user, int goUpHierarchy)
+ Set doGetGroups(String user, int goUpHierarchy)
throws NamingException {
DirContext c = getDirContext();
@@ -518,11 +484,11 @@ List doGetGroups(String user, int goUpHierarchy)
if (!results.hasMoreElements()) {
LOG.debug("doGetGroups({}) returned no groups because the " +
"user is not found.", user);
- return new ArrayList<>();
+ return Collections.emptySet();
}
SearchResult result = results.nextElement();
- List groups = null;
+ Set groups = Collections.emptySet();
if (useOneQuery) {
try {
/**
@@ -536,7 +502,7 @@ List doGetGroups(String user, int goUpHierarchy)
memberOfAttr + "' attribute." +
"Returned user object: " + result.toString());
}
- groups = new ArrayList<>();
+ groups = new LinkedHashSet<>();
NamingEnumeration groupEnumeration = groupDNAttr.getAll();
while (groupEnumeration.hasMore()) {
String groupDN = groupEnumeration.next().toString();
@@ -548,7 +514,7 @@ List doGetGroups(String user, int goUpHierarchy)
"the second LDAP query using the user's DN.", e);
}
}
- if (groups == null || groups.isEmpty() || goUpHierarchy > 0) {
+ if (groups.isEmpty() || goUpHierarchy > 0) {
groups = lookupGroup(result, c, goUpHierarchy);
}
LOG.debug("doGetGroups({}) returned {}", user, groups);
@@ -700,6 +666,42 @@ public void cacheGroupsAdd(List