diff --git a/BUILDING.txt b/BUILDING.txt index d54ce83183846..c96c851204e75 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -6,6 +6,7 @@ Requirements: * Unix System * JDK 1.8 * Maven 3.3 or later +* Boost 1.72 (if compiling native code) * Protocol Buffers 3.7.1 (if compiling native code) * CMake 3.1 or newer (if compiling native code) * Zlib devel (if compiling native code) @@ -72,6 +73,12 @@ Installing required packages for clean install of Ubuntu 14.04 LTS Desktop: && ./configure\ && make install \ && rm -rf /opt/protobuf-3.7-src +* Boost + $ curl -L https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.bz2/download > boost_1_72_0.tar.bz2 \ + && tar --bzip2 -xf boost_1_72_0.tar.bz2 \ + && cd boost_1_72_0 \ + && ./bootstrap.sh --prefix=/usr/ \ + && ./b2 --without-python install Optional packages: @@ -468,6 +475,7 @@ Requirements: * Windows System * JDK 1.8 * Maven 3.0 or later +* Boost 1.72 * Protocol Buffers 3.7.1 * CMake 3.1 or newer * Visual Studio 2010 Professional or Higher diff --git a/Jenkinsfile b/Jenkinsfile index 02b9a0eabdff3..0461c5727aff9 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -23,7 +23,7 @@ pipeline { options { buildDiscarder(logRotator(numToKeepStr: '5')) - timeout (time: 5, unit: 'HOURS') + timeout (time: 20, unit: 'HOURS') timestamps() checkoutToSubdirectory('src') } @@ -154,6 +154,11 @@ pipeline { # use emoji vote so it is easier to find the broken line YETUS_ARGS+=("--github-use-emoji-vote") + # test with Java 8 and 11 + YETUS_ARGS+=("--java-home=/usr/lib/jvm/java-8-openjdk-amd64") + YETUS_ARGS+=("--multijdkdirs=/usr/lib/jvm/java-11-openjdk-amd64") + YETUS_ARGS+=("--multijdktests=compile") + "${TESTPATCHBIN}" "${YETUS_ARGS[@]}" ''' } diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 5bd867f2f56c1..f72fa4659009a 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -33,16 +33,10 @@ RUN echo APT::Install-Suggests "0"\; >> /etc/apt/apt.conf.d/10disableextras ENV DEBIAN_FRONTEND noninteractive ENV DEBCONF_TERSE true -###### -# Install common dependencies from packages. Versions here are either -# sufficient or irrelevant. -# -# WARNING: DO NOT PUT JAVA APPS HERE! Otherwise they will install default -# Ubuntu Java. See Java section below! -###### # hadolint ignore=DL3008 RUN apt-get -q update \ && apt-get -q install -y --no-install-recommends \ + ant \ apt-utils \ bats \ build-essential \ @@ -51,11 +45,13 @@ RUN apt-get -q update \ cmake \ curl \ doxygen \ + findbugs \ fuse \ g++ \ gcc \ git \ gnupg-agent \ + libbcprov-java \ libbz2-dev \ libcurl4-openssl-dev \ libfuse-dev \ @@ -64,11 +60,13 @@ RUN apt-get -q update \ libsasl2-dev \ libsnappy-dev \ libssl-dev \ - libsnappy-dev \ libtool \ libzstd1-dev \ locales \ make \ + maven \ + openjdk-11-jdk \ + openjdk-8-jdk \ pinentry-curses \ pkg-config \ python \ @@ -86,15 +84,28 @@ RUN apt-get -q update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* +###### +# Set env vars required to build Hadoop +###### +ENV MAVEN_HOME /usr +# JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) +ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64 +ENV FINDBUGS_HOME /usr ####### -# OpenJDK 8 +# Install Boost 1.72 (1.65 ships with Bionic) ####### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends openjdk-8-jdk libbcprov-java \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* +# hadolint ignore=DL3003 +RUN mkdir -p /opt/boost-library \ + && curl -L https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.bz2/download > boost_1_72_0.tar.bz2 \ + && mv boost_1_72_0.tar.bz2 /opt/boost-library \ + && cd /opt/boost-library \ + && tar --bzip2 -xf boost_1_72_0.tar.bz2 \ + && cd /opt/boost-library/boost_1_72_0 \ + && ./bootstrap.sh --prefix=/usr/ \ + && ./b2 --without-python install \ + && cd /root \ + && rm -rf /opt/boost-library ###### # Install Google Protobuf 3.7.1 (3.0.0 ships with Bionic) @@ -113,29 +124,6 @@ RUN mkdir -p /opt/protobuf-src \ ENV PROTOBUF_HOME /opt/protobuf ENV PATH "${PATH}:/opt/protobuf/bin" -###### -# Install Apache Maven 3.6.0 (3.6.0 ships with Bionic) -###### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends maven \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* -ENV MAVEN_HOME /usr -# JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) -ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64 - -###### -# Install findbugs 3.1.0 (3.1.0 ships with Bionic) -# Ant is needed for findbugs -###### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends findbugs ant \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* -ENV FINDBUGS_HOME /usr - #### # Install pylint at fixed version (2.0.0 removed python2 support) # https://github.com/PyCQA/pylint/issues/2294 @@ -150,10 +138,10 @@ RUN pip2 install \ RUN pip2 install python-dateutil==2.7.3 ### -# Install node.js 8.17.0 for web UI framework (4.2.6 ships with Xenial) +# Install node.js 10.21.0 for web UI framework (4.2.6 ships with Xenial) ### -RUN curl -L -s -S https://deb.nodesource.com/setup_8.x | bash - \ - && apt-get install -y --no-install-recommends nodejs=8.17.0-1nodesource1 \ +RUN curl -L -s -S https://deb.nodesource.com/setup_10.x | bash - \ + && apt-get install -y --no-install-recommends nodejs=10.21.0-1nodesource1 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* \ && npm install -g bower@1.8.8 diff --git a/dev-support/docker/Dockerfile_aarch64 b/dev-support/docker/Dockerfile_aarch64 index d0cfa5a2fa24f..5fd646fb9c08a 100644 --- a/dev-support/docker/Dockerfile_aarch64 +++ b/dev-support/docker/Dockerfile_aarch64 @@ -17,7 +17,7 @@ # Dockerfile for installing the necessary dependencies for building Hadoop. # See BUILDING.txt. -FROM ubuntu:xenial +FROM ubuntu:bionic WORKDIR /root @@ -35,24 +35,26 @@ ENV DEBCONF_TERSE true ###### # Install common dependencies from packages. Versions here are either # sufficient or irrelevant. -# -# WARNING: DO NOT PUT JAVA APPS HERE! Otherwise they will install default -# Ubuntu Java. See Java section below! ###### # hadolint ignore=DL3008 RUN apt-get -q update \ && apt-get -q install -y --no-install-recommends \ + ant \ apt-utils \ + bats \ build-essential \ bzip2 \ clang \ + cmake \ curl \ doxygen \ + findbugs \ fuse \ g++ \ gcc \ git \ gnupg-agent \ + libbcprov-java \ libbz2-dev \ libcurl4-openssl-dev \ libfuse-dev \ @@ -65,6 +67,9 @@ RUN apt-get -q update \ libzstd1-dev \ locales \ make \ + maven \ + openjdk-11-jdk \ + openjdk-8-jdk \ pinentry-curses \ pkg-config \ python \ @@ -74,47 +79,39 @@ RUN apt-get -q update \ python-setuptools \ python-wheel \ rsync \ + shellcheck \ software-properties-common \ - snappy \ sudo \ valgrind \ zlib1g-dev \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* +###### +# Set env vars required to build Hadoop +###### +ENV MAVEN_HOME /usr +# JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) +ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-arm64 +ENV FINDBUGS_HOME /usr ####### -# OpenJDK 8 +# Install Boost 1.72 (1.65 ships with Bionic) ####### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends openjdk-8-jdk libbcprov-java \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - - -###### -# Install cmake 3.1.0 (3.5.1 ships with Xenial) -# There is no cmake binary available for aarch64. Build from source. -###### # hadolint ignore=DL3003 -RUN mkdir -p /opt/cmake/src \ - && curl -L -s -S \ - https://cmake.org/files/v3.1/cmake-3.1.0-1-src.tar.bz2 \ - -o /opt/cmake/cmake-src.tar.bz2 \ - && tar xvjf /opt/cmake/cmake-src.tar.bz2 -C /opt/cmake/src \ - && cd /opt/cmake/src \ - && tar xvjf cmake-3.1.0.tar.bz2 \ - && cd cmake-3.1.0 && patch -p0 -i ../cmake-3.1.0-1.patch && mkdir .build && cd .build \ - && ../bootstrap --parallel=2 \ - && make -j2 && ./bin/cpack \ - && tar xzf cmake-3.1.0-Linux-aarch64.tar.gz --strip-components 1 -C /opt/cmake \ - && cd /opt/cmake && rm -rf /opt/cmake/src -ENV CMAKE_HOME /opt/cmake -ENV PATH "${PATH}:/opt/cmake/bin" +RUN mkdir -p /opt/boost-library \ + && curl -L https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.bz2/download > boost_1_72_0.tar.bz2 \ + && mv boost_1_72_0.tar.bz2 /opt/boost-library \ + && cd /opt/boost-library \ + && tar --bzip2 -xf boost_1_72_0.tar.bz2 \ + && cd /opt/boost-library/boost_1_72_0 \ + && ./bootstrap.sh --prefix=/usr/ \ + && ./b2 --without-python install \ + && cd /root \ + && rm -rf /opt/boost-library ###### -# Install Google Protobuf 3.7.1 (2.6.0 ships with Xenial) +# Install Google Protobuf 3.7.1 (3.0.0 ships with Bionic) ###### # hadolint ignore=DL3003 RUN mkdir -p /opt/protobuf-src \ @@ -130,46 +127,6 @@ RUN mkdir -p /opt/protobuf-src \ ENV PROTOBUF_HOME /opt/protobuf ENV PATH "${PATH}:/opt/protobuf/bin" -###### -# Install Apache Maven 3.3.9 (3.3.9 ships with Xenial) -###### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends maven \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* -ENV MAVEN_HOME /usr - -###### -# Install findbugs 3.0.1 (3.0.1 ships with Xenial) -# Ant is needed for findbugs -###### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends findbugs ant \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* -ENV FINDBUGS_HOME /usr - -#### -# Install shellcheck (0.4.6, the latest as of 2017-09-26) -#### -# hadolint ignore=DL3008 -RUN add-apt-repository -y ppa:hvr/ghc \ - && apt-get -q update \ - && apt-get -q install -y --no-install-recommends shellcheck \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -#### -# Install bats (0.4.0, the latest as of 2017-09-26, ships with Xenial) -#### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends bats \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - #### # Install pylint at fixed version (2.0.0 removed python2 support) # https://github.com/PyCQA/pylint/issues/2294 @@ -184,10 +141,10 @@ RUN pip2 install \ RUN pip2 install python-dateutil==2.7.3 ### -# Install node.js 8.17.0 for web UI framework (4.2.6 ships with Xenial) +# Install node.js 10.21.0 for web UI framework (4.2.6 ships with Xenial) ### -RUN curl -L -s -S https://deb.nodesource.com/setup_8.x | bash - \ - && apt-get install -y --no-install-recommends nodejs=8.17.0-1nodesource1 \ +RUN curl -L -s -S https://deb.nodesource.com/setup_10.x | bash - \ + && apt-get install -y --no-install-recommends nodejs=10.21.0-1nodesource1 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* \ && npm install -g bower@1.8.8 diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml index 054d8c0ace2bd..db744f511dadb 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml @@ -47,6 +47,14 @@ /libexec/shellprofile.d 0755 + + ../hadoop-federation-balance/src/main/shellprofile.d + + * + + /libexec/shellprofile.d + 0755 + ../hadoop-extras/src/main/shellprofile.d @@ -111,6 +119,13 @@ *-sources.jar + + ../hadoop-federation-balance/target + /share/hadoop/${hadoop.component}/sources + + *-sources.jar + + ../hadoop-extras/target /share/hadoop/${hadoop.component}/sources diff --git a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml index 8f3d3f13824ef..96e28496d7e98 100644 --- a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml +++ b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml @@ -119,7 +119,12 @@ - + + + + + diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml index b447eedf1349f..f66528dc7f23c 100644 --- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml +++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml @@ -811,15 +811,25 @@ */** - + org.eclipse.jetty:jetty-client */** + + org.eclipse.jetty:jetty-xml + + */** + + + + org.eclipse.jetty:jetty-http + + */** + + diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml index fe95ed8688548..9a1efff6b1455 100644 --- a/hadoop-client-modules/hadoop-client-runtime/pom.xml +++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml @@ -158,12 +158,8 @@ com.google.code.findbugs:jsr305 io.dropwizard.metrics:metrics-core - org.eclipse.jetty.websocket:* org.eclipse.jetty:jetty-servlet org.eclipse.jetty:jetty-security - org.eclipse.jetty:jetty-client - org.eclipse.jetty:jetty-http - org.eclipse.jetty:jetty-xml org.ow2.asm:* org.bouncycastle:* @@ -213,6 +209,13 @@ about.html + + + org.eclipse.jetty.websocket:* + + about.html + + org.apache.kerby:kerb-util diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml index cf5c3874d1063..23e39d055ffc5 100644 --- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml +++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml @@ -283,6 +283,10 @@ + + + + diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index dd058812fc774..9bb70ac76a06a 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -395,7 +395,12 @@ src-compile-protoc - false + + false + + ProtobufRpcEngine.proto + + src-test-compile-protoc @@ -411,6 +416,9 @@ replace-generated-sources false + + **/ProtobufRpcEngineProtos.java + @@ -423,6 +431,14 @@ replace-sources false + + + **/ProtobufHelper.java + **/RpcWritable.java + **/ProtobufRpcEngineCallback.java + **/ProtobufRpcEngine.java + **/ProtobufRpcEngineProtos.java + @@ -1015,7 +1031,79 @@ - + + + aarch64 + + false + + aarch64 + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + add-source-legacy-protobuf + generate-sources + + add-source + + + + ${basedir}/src/main/arm-java + + + + + + + + + + + x86_64 + + false + + !aarch64 + + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + + + src-compile-protoc-legacy + generate-sources + + compile + + + false + + + com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + + false + ${basedir}/src/main/proto + ${project.build.directory}/generated-sources/java + false + + ProtobufRpcEngine.proto + + + + + + + + diff --git a/hadoop-common-project/hadoop-common/src/main/arm-java/org/apache/hadoop/ipc/protobuf/ProtobufRpcEngineProtos.java b/hadoop-common-project/hadoop-common/src/main/arm-java/org/apache/hadoop/ipc/protobuf/ProtobufRpcEngineProtos.java new file mode 100644 index 0000000000000..28e28bf633784 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/arm-java/org/apache/hadoop/ipc/protobuf/ProtobufRpcEngineProtos.java @@ -0,0 +1,1163 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// This is class is added to source because for arm protoc 2.5.0 executable +// is not available to generate the same code. +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: ProtobufRpcEngine.proto +package org.apache.hadoop.ipc.protobuf; + +public final class ProtobufRpcEngineProtos { + private ProtobufRpcEngineProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface RequestHeaderProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string methodName = 1; + /** + * required string methodName = 1; + * + *
+     ** Name of the RPC method
+     * 
+ */ + boolean hasMethodName(); + /** + * required string methodName = 1; + * + *
+     ** Name of the RPC method
+     * 
+ */ + java.lang.String getMethodName(); + /** + * required string methodName = 1; + * + *
+     ** Name of the RPC method
+     * 
+ */ + com.google.protobuf.ByteString + getMethodNameBytes(); + + // required string declaringClassProtocolName = 2; + /** + * required string declaringClassProtocolName = 2; + * + *
+     **
+     * RPCs for a particular interface (ie protocol) are done using a
+     * IPC connection that is setup using rpcProxy.
+     * The rpcProxy's has a declared protocol name that is
+     * sent form client to server at connection time.
+     *
+     * Each Rpc call also sends a protocol name
+     * (called declaringClassprotocolName). This name is usually the same
+     * as the connection protocol name except in some cases.
+     * For example metaProtocols such ProtocolInfoProto which get metainfo
+     * about the protocol reuse the connection but need to indicate that
+     * the actual protocol is different (i.e. the protocol is
+     * ProtocolInfoProto) since they reuse the connection; in this case
+     * the declaringClassProtocolName field is set to the ProtocolInfoProto
+     * 
+ */ + boolean hasDeclaringClassProtocolName(); + /** + * required string declaringClassProtocolName = 2; + * + *
+     **
+     * RPCs for a particular interface (ie protocol) are done using a
+     * IPC connection that is setup using rpcProxy.
+     * The rpcProxy's has a declared protocol name that is
+     * sent form client to server at connection time.
+     *
+     * Each Rpc call also sends a protocol name
+     * (called declaringClassprotocolName). This name is usually the same
+     * as the connection protocol name except in some cases.
+     * For example metaProtocols such ProtocolInfoProto which get metainfo
+     * about the protocol reuse the connection but need to indicate that
+     * the actual protocol is different (i.e. the protocol is
+     * ProtocolInfoProto) since they reuse the connection; in this case
+     * the declaringClassProtocolName field is set to the ProtocolInfoProto
+     * 
+ */ + java.lang.String getDeclaringClassProtocolName(); + /** + * required string declaringClassProtocolName = 2; + * + *
+     **
+     * RPCs for a particular interface (ie protocol) are done using a
+     * IPC connection that is setup using rpcProxy.
+     * The rpcProxy's has a declared protocol name that is
+     * sent form client to server at connection time.
+     *
+     * Each Rpc call also sends a protocol name
+     * (called declaringClassprotocolName). This name is usually the same
+     * as the connection protocol name except in some cases.
+     * For example metaProtocols such ProtocolInfoProto which get metainfo
+     * about the protocol reuse the connection but need to indicate that
+     * the actual protocol is different (i.e. the protocol is
+     * ProtocolInfoProto) since they reuse the connection; in this case
+     * the declaringClassProtocolName field is set to the ProtocolInfoProto
+     * 
+ */ + com.google.protobuf.ByteString + getDeclaringClassProtocolNameBytes(); + + // required uint64 clientProtocolVersion = 3; + /** + * required uint64 clientProtocolVersion = 3; + * + *
+     ** protocol version of class declaring the called method
+     * 
+ */ + boolean hasClientProtocolVersion(); + /** + * required uint64 clientProtocolVersion = 3; + * + *
+     ** protocol version of class declaring the called method
+     * 
+ */ + long getClientProtocolVersion(); + } + /** + * Protobuf type {@code hadoop.common.RequestHeaderProto} + * + *
+   **
+   * This message is the header for the Protobuf Rpc Engine
+   * when sending a RPC request from  RPC client to the RPC server.
+   * The actual request (serialized as protobuf) follows this request.
+   *
+   * No special header is needed for the Rpc Response for Protobuf Rpc Engine.
+   * The normal RPC response header (see RpcHeader.proto) are sufficient.
+   * 
+ */ + public static final class RequestHeaderProto extends + com.google.protobuf.GeneratedMessage + implements RequestHeaderProtoOrBuilder { + // Use RequestHeaderProto.newBuilder() to construct. + private RequestHeaderProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RequestHeaderProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RequestHeaderProto defaultInstance; + public static RequestHeaderProto getDefaultInstance() { + return defaultInstance; + } + + public RequestHeaderProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RequestHeaderProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + methodName_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + declaringClassProtocolName_ = input.readBytes(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + clientProtocolVersion_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.class, org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RequestHeaderProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RequestHeaderProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string methodName = 1; + public static final int METHODNAME_FIELD_NUMBER = 1; + private java.lang.Object methodName_; + /** + * required string methodName = 1; + * + *
+     ** Name of the RPC method
+     * 
+ */ + public boolean hasMethodName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string methodName = 1; + * + *
+     ** Name of the RPC method
+     * 
+ */ + public java.lang.String getMethodName() { + java.lang.Object ref = methodName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + methodName_ = s; + } + return s; + } + } + /** + * required string methodName = 1; + * + *
+     ** Name of the RPC method
+     * 
+ */ + public com.google.protobuf.ByteString + getMethodNameBytes() { + java.lang.Object ref = methodName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + methodName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string declaringClassProtocolName = 2; + public static final int DECLARINGCLASSPROTOCOLNAME_FIELD_NUMBER = 2; + private java.lang.Object declaringClassProtocolName_; + /** + * required string declaringClassProtocolName = 2; + * + *
+     **
+     * RPCs for a particular interface (ie protocol) are done using a
+     * IPC connection that is setup using rpcProxy.
+     * The rpcProxy's has a declared protocol name that is
+     * sent form client to server at connection time.
+     *
+     * Each Rpc call also sends a protocol name
+     * (called declaringClassprotocolName). This name is usually the same
+     * as the connection protocol name except in some cases.
+     * For example metaProtocols such ProtocolInfoProto which get metainfo
+     * about the protocol reuse the connection but need to indicate that
+     * the actual protocol is different (i.e. the protocol is
+     * ProtocolInfoProto) since they reuse the connection; in this case
+     * the declaringClassProtocolName field is set to the ProtocolInfoProto
+     * 
+ */ + public boolean hasDeclaringClassProtocolName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string declaringClassProtocolName = 2; + * + *
+     **
+     * RPCs for a particular interface (ie protocol) are done using a
+     * IPC connection that is setup using rpcProxy.
+     * The rpcProxy's has a declared protocol name that is
+     * sent form client to server at connection time.
+     *
+     * Each Rpc call also sends a protocol name
+     * (called declaringClassprotocolName). This name is usually the same
+     * as the connection protocol name except in some cases.
+     * For example metaProtocols such ProtocolInfoProto which get metainfo
+     * about the protocol reuse the connection but need to indicate that
+     * the actual protocol is different (i.e. the protocol is
+     * ProtocolInfoProto) since they reuse the connection; in this case
+     * the declaringClassProtocolName field is set to the ProtocolInfoProto
+     * 
+ */ + public java.lang.String getDeclaringClassProtocolName() { + java.lang.Object ref = declaringClassProtocolName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + declaringClassProtocolName_ = s; + } + return s; + } + } + /** + * required string declaringClassProtocolName = 2; + * + *
+     **
+     * RPCs for a particular interface (ie protocol) are done using a
+     * IPC connection that is setup using rpcProxy.
+     * The rpcProxy's has a declared protocol name that is
+     * sent form client to server at connection time.
+     *
+     * Each Rpc call also sends a protocol name
+     * (called declaringClassprotocolName). This name is usually the same
+     * as the connection protocol name except in some cases.
+     * For example metaProtocols such ProtocolInfoProto which get metainfo
+     * about the protocol reuse the connection but need to indicate that
+     * the actual protocol is different (i.e. the protocol is
+     * ProtocolInfoProto) since they reuse the connection; in this case
+     * the declaringClassProtocolName field is set to the ProtocolInfoProto
+     * 
+ */ + public com.google.protobuf.ByteString + getDeclaringClassProtocolNameBytes() { + java.lang.Object ref = declaringClassProtocolName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + declaringClassProtocolName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required uint64 clientProtocolVersion = 3; + public static final int CLIENTPROTOCOLVERSION_FIELD_NUMBER = 3; + private long clientProtocolVersion_; + /** + * required uint64 clientProtocolVersion = 3; + * + *
+     ** protocol version of class declaring the called method
+     * 
+ */ + public boolean hasClientProtocolVersion() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint64 clientProtocolVersion = 3; + * + *
+     ** protocol version of class declaring the called method
+     * 
+ */ + public long getClientProtocolVersion() { + return clientProtocolVersion_; + } + + private void initFields() { + methodName_ = ""; + declaringClassProtocolName_ = ""; + clientProtocolVersion_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasMethodName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasDeclaringClassProtocolName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasClientProtocolVersion()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getMethodNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getDeclaringClassProtocolNameBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, clientProtocolVersion_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getMethodNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getDeclaringClassProtocolNameBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, clientProtocolVersion_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto other = (org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto) obj; + + boolean result = true; + result = result && (hasMethodName() == other.hasMethodName()); + if (hasMethodName()) { + result = result && getMethodName() + .equals(other.getMethodName()); + } + result = result && (hasDeclaringClassProtocolName() == other.hasDeclaringClassProtocolName()); + if (hasDeclaringClassProtocolName()) { + result = result && getDeclaringClassProtocolName() + .equals(other.getDeclaringClassProtocolName()); + } + result = result && (hasClientProtocolVersion() == other.hasClientProtocolVersion()); + if (hasClientProtocolVersion()) { + result = result && (getClientProtocolVersion() + == other.getClientProtocolVersion()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasMethodName()) { + hash = (37 * hash) + METHODNAME_FIELD_NUMBER; + hash = (53 * hash) + getMethodName().hashCode(); + } + if (hasDeclaringClassProtocolName()) { + hash = (37 * hash) + DECLARINGCLASSPROTOCOLNAME_FIELD_NUMBER; + hash = (53 * hash) + getDeclaringClassProtocolName().hashCode(); + } + if (hasClientProtocolVersion()) { + hash = (37 * hash) + CLIENTPROTOCOLVERSION_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getClientProtocolVersion()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.RequestHeaderProto} + * + *
+     **
+     * This message is the header for the Protobuf Rpc Engine
+     * when sending a RPC request from  RPC client to the RPC server.
+     * The actual request (serialized as protobuf) follows this request.
+     *
+     * No special header is needed for the Rpc Response for Protobuf Rpc Engine.
+     * The normal RPC response header (see RpcHeader.proto) are sufficient.
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.class, org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + methodName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + declaringClassProtocolName_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + clientProtocolVersion_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto build() { + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto buildPartial() { + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto result = new org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.methodName_ = methodName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.declaringClassProtocolName_ = declaringClassProtocolName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.clientProtocolVersion_ = clientProtocolVersion_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto other) { + if (other == org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.getDefaultInstance()) return this; + if (other.hasMethodName()) { + bitField0_ |= 0x00000001; + methodName_ = other.methodName_; + onChanged(); + } + if (other.hasDeclaringClassProtocolName()) { + bitField0_ |= 0x00000002; + declaringClassProtocolName_ = other.declaringClassProtocolName_; + onChanged(); + } + if (other.hasClientProtocolVersion()) { + setClientProtocolVersion(other.getClientProtocolVersion()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasMethodName()) { + + return false; + } + if (!hasDeclaringClassProtocolName()) { + + return false; + } + if (!hasClientProtocolVersion()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string methodName = 1; + private java.lang.Object methodName_ = ""; + /** + * required string methodName = 1; + * + *
+       ** Name of the RPC method
+       * 
+ */ + public boolean hasMethodName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string methodName = 1; + * + *
+       ** Name of the RPC method
+       * 
+ */ + public java.lang.String getMethodName() { + java.lang.Object ref = methodName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + methodName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string methodName = 1; + * + *
+       ** Name of the RPC method
+       * 
+ */ + public com.google.protobuf.ByteString + getMethodNameBytes() { + java.lang.Object ref = methodName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + methodName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string methodName = 1; + * + *
+       ** Name of the RPC method
+       * 
+ */ + public Builder setMethodName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + methodName_ = value; + onChanged(); + return this; + } + /** + * required string methodName = 1; + * + *
+       ** Name of the RPC method
+       * 
+ */ + public Builder clearMethodName() { + bitField0_ = (bitField0_ & ~0x00000001); + methodName_ = getDefaultInstance().getMethodName(); + onChanged(); + return this; + } + /** + * required string methodName = 1; + * + *
+       ** Name of the RPC method
+       * 
+ */ + public Builder setMethodNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + methodName_ = value; + onChanged(); + return this; + } + + // required string declaringClassProtocolName = 2; + private java.lang.Object declaringClassProtocolName_ = ""; + /** + * required string declaringClassProtocolName = 2; + * + *
+       **
+       * RPCs for a particular interface (ie protocol) are done using a
+       * IPC connection that is setup using rpcProxy.
+       * The rpcProxy's has a declared protocol name that is
+       * sent form client to server at connection time.
+       *
+       * Each Rpc call also sends a protocol name
+       * (called declaringClassprotocolName). This name is usually the same
+       * as the connection protocol name except in some cases.
+       * For example metaProtocols such ProtocolInfoProto which get metainfo
+       * about the protocol reuse the connection but need to indicate that
+       * the actual protocol is different (i.e. the protocol is
+       * ProtocolInfoProto) since they reuse the connection; in this case
+       * the declaringClassProtocolName field is set to the ProtocolInfoProto
+       * 
+ */ + public boolean hasDeclaringClassProtocolName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string declaringClassProtocolName = 2; + * + *
+       **
+       * RPCs for a particular interface (ie protocol) are done using a
+       * IPC connection that is setup using rpcProxy.
+       * The rpcProxy's has a declared protocol name that is
+       * sent form client to server at connection time.
+       *
+       * Each Rpc call also sends a protocol name
+       * (called declaringClassprotocolName). This name is usually the same
+       * as the connection protocol name except in some cases.
+       * For example metaProtocols such ProtocolInfoProto which get metainfo
+       * about the protocol reuse the connection but need to indicate that
+       * the actual protocol is different (i.e. the protocol is
+       * ProtocolInfoProto) since they reuse the connection; in this case
+       * the declaringClassProtocolName field is set to the ProtocolInfoProto
+       * 
+ */ + public java.lang.String getDeclaringClassProtocolName() { + java.lang.Object ref = declaringClassProtocolName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + declaringClassProtocolName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string declaringClassProtocolName = 2; + * + *
+       **
+       * RPCs for a particular interface (ie protocol) are done using a
+       * IPC connection that is setup using rpcProxy.
+       * The rpcProxy's has a declared protocol name that is
+       * sent form client to server at connection time.
+       *
+       * Each Rpc call also sends a protocol name
+       * (called declaringClassprotocolName). This name is usually the same
+       * as the connection protocol name except in some cases.
+       * For example metaProtocols such ProtocolInfoProto which get metainfo
+       * about the protocol reuse the connection but need to indicate that
+       * the actual protocol is different (i.e. the protocol is
+       * ProtocolInfoProto) since they reuse the connection; in this case
+       * the declaringClassProtocolName field is set to the ProtocolInfoProto
+       * 
+ */ + public com.google.protobuf.ByteString + getDeclaringClassProtocolNameBytes() { + java.lang.Object ref = declaringClassProtocolName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + declaringClassProtocolName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string declaringClassProtocolName = 2; + * + *
+       **
+       * RPCs for a particular interface (ie protocol) are done using a
+       * IPC connection that is setup using rpcProxy.
+       * The rpcProxy's has a declared protocol name that is
+       * sent form client to server at connection time.
+       *
+       * Each Rpc call also sends a protocol name
+       * (called declaringClassprotocolName). This name is usually the same
+       * as the connection protocol name except in some cases.
+       * For example metaProtocols such ProtocolInfoProto which get metainfo
+       * about the protocol reuse the connection but need to indicate that
+       * the actual protocol is different (i.e. the protocol is
+       * ProtocolInfoProto) since they reuse the connection; in this case
+       * the declaringClassProtocolName field is set to the ProtocolInfoProto
+       * 
+ */ + public Builder setDeclaringClassProtocolName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + declaringClassProtocolName_ = value; + onChanged(); + return this; + } + /** + * required string declaringClassProtocolName = 2; + * + *
+       **
+       * RPCs for a particular interface (ie protocol) are done using a
+       * IPC connection that is setup using rpcProxy.
+       * The rpcProxy's has a declared protocol name that is
+       * sent form client to server at connection time.
+       *
+       * Each Rpc call also sends a protocol name
+       * (called declaringClassprotocolName). This name is usually the same
+       * as the connection protocol name except in some cases.
+       * For example metaProtocols such ProtocolInfoProto which get metainfo
+       * about the protocol reuse the connection but need to indicate that
+       * the actual protocol is different (i.e. the protocol is
+       * ProtocolInfoProto) since they reuse the connection; in this case
+       * the declaringClassProtocolName field is set to the ProtocolInfoProto
+       * 
+ */ + public Builder clearDeclaringClassProtocolName() { + bitField0_ = (bitField0_ & ~0x00000002); + declaringClassProtocolName_ = getDefaultInstance().getDeclaringClassProtocolName(); + onChanged(); + return this; + } + /** + * required string declaringClassProtocolName = 2; + * + *
+       **
+       * RPCs for a particular interface (ie protocol) are done using a
+       * IPC connection that is setup using rpcProxy.
+       * The rpcProxy's has a declared protocol name that is
+       * sent form client to server at connection time.
+       *
+       * Each Rpc call also sends a protocol name
+       * (called declaringClassprotocolName). This name is usually the same
+       * as the connection protocol name except in some cases.
+       * For example metaProtocols such ProtocolInfoProto which get metainfo
+       * about the protocol reuse the connection but need to indicate that
+       * the actual protocol is different (i.e. the protocol is
+       * ProtocolInfoProto) since they reuse the connection; in this case
+       * the declaringClassProtocolName field is set to the ProtocolInfoProto
+       * 
+ */ + public Builder setDeclaringClassProtocolNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + declaringClassProtocolName_ = value; + onChanged(); + return this; + } + + // required uint64 clientProtocolVersion = 3; + private long clientProtocolVersion_ ; + /** + * required uint64 clientProtocolVersion = 3; + * + *
+       ** protocol version of class declaring the called method
+       * 
+ */ + public boolean hasClientProtocolVersion() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint64 clientProtocolVersion = 3; + * + *
+       ** protocol version of class declaring the called method
+       * 
+ */ + public long getClientProtocolVersion() { + return clientProtocolVersion_; + } + /** + * required uint64 clientProtocolVersion = 3; + * + *
+       ** protocol version of class declaring the called method
+       * 
+ */ + public Builder setClientProtocolVersion(long value) { + bitField0_ |= 0x00000004; + clientProtocolVersion_ = value; + onChanged(); + return this; + } + /** + * required uint64 clientProtocolVersion = 3; + * + *
+       ** protocol version of class declaring the called method
+       * 
+ */ + public Builder clearClientProtocolVersion() { + bitField0_ = (bitField0_ & ~0x00000004); + clientProtocolVersion_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.RequestHeaderProto) + } + + static { + defaultInstance = new RequestHeaderProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.RequestHeaderProto) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_RequestHeaderProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_RequestHeaderProto_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\027ProtobufRpcEngine.proto\022\rhadoop.common" + + "\"k\n\022RequestHeaderProto\022\022\n\nmethodName\030\001 \002" + + "(\t\022\"\n\032declaringClassProtocolName\030\002 \002(\t\022\035" + + "\n\025clientProtocolVersion\030\003 \002(\004B<\n\036org.apa" + + "che.hadoop.ipc.protobufB\027ProtobufRpcEngi" + + "neProtos\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_hadoop_common_RequestHeaderProto_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_hadoop_common_RequestHeaderProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_RequestHeaderProto_descriptor, + new java.lang.String[] { "MethodName", "DeclaringClassProtocolName", "ClientProtocolVersion", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 9751a9b66945c..b93581e8a7b98 100755 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -3869,6 +3869,7 @@ public Map getValByRegex(String regex) { Pattern p = Pattern.compile(regex); Map result = new HashMap(); + List resultKeys = new ArrayList<>(); Matcher m; for(Map.Entry item: getProps().entrySet()) { @@ -3876,11 +3877,12 @@ public Map getValByRegex(String regex) { item.getValue() instanceof String) { m = p.matcher((String)item.getKey()); if(m.find()) { // match - result.put((String) item.getKey(), - substituteVars(getProps().getProperty((String) item.getKey()))); + resultKeys.add((String) item.getKey()); } } } + resultKeys.forEach(item -> + result.put(item, substituteVars(getProps().getProperty(item)))); return result; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index 1df68b647c99a..ec346b482a452 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -23,7 +23,6 @@ import java.lang.reflect.InvocationTargetException; import java.net.URI; import java.net.URISyntaxException; -import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; @@ -1032,7 +1031,7 @@ public String getCanonicalServiceName() { */ @InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" }) public List> getDelegationTokens(String renewer) throws IOException { - return new ArrayList>(0); + return Collections.emptyList(); } /** @@ -1383,4 +1382,34 @@ public boolean hasPathCapability(final Path path, return false; } } + + /** + * Create a multipart uploader. + * @param basePath file path under which all files are uploaded + * @return a MultipartUploaderBuilder object to build the uploader + * @throws IOException if some early checks cause IO failures. + * @throws UnsupportedOperationException if support is checked early. + */ + @InterfaceStability.Unstable + public MultipartUploaderBuilder createMultipartUploader(Path basePath) + throws IOException { + methodNotSupported(); + return null; + } + + /** + * Helper method that throws an {@link UnsupportedOperationException} for the + * current {@link FileSystem} method being called. + */ + protected final void methodNotSupported() { + // The order of the stacktrace elements is (from top to bottom): + // - java.lang.Thread.getStackTrace + // - org.apache.hadoop.fs.FileSystem.methodNotSupported + // - + // therefore, to find out the current method name, we use the element at + // index 2. + String name = Thread.currentThread().getStackTrace()[2].getMethodName(); + throw new UnsupportedOperationException(getClass().getCanonicalName() + + " does not support method " + name); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java index fb46ef81e36fa..539b3e27c0351 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java @@ -131,4 +131,12 @@ private CommonPathCapabilities() { @InterfaceStability.Unstable public static final String FS_EXPERIMENTAL_BATCH_LISTING = "fs.capability.batch.listing"; + + /** + * Does the store support multipart uploading? + * Value: {@value}. + */ + public static final String FS_MULTIPART_UPLOADER = + "fs.capability.multipart.uploader"; + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index ba0064f0813d3..e5f4ef3809f18 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -66,6 +66,7 @@ import org.apache.hadoop.util.ShutdownHookManager; import com.google.common.base.Preconditions; +import com.google.common.annotations.VisibleForTesting; import org.apache.htrace.core.Tracer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -507,10 +508,9 @@ public static FileContext getLocalFSFileContext(final Configuration aConf) return getFileContext(FsConstants.LOCAL_FS_URI, aConf); } - /* This method is needed for tests. */ + @VisibleForTesting @InterfaceAudience.Private - @InterfaceStability.Unstable /* return type will change to AFS once - HADOOP-6223 is completed */ + @InterfaceStability.Unstable public AbstractFileSystem getDefaultFileSystem() { return defaultFS; } @@ -2957,4 +2957,31 @@ public boolean hasPathCapability(Path path, String capability) (fs, p) -> fs.hasPathCapability(p, capability)); } + /** + * Return a set of server default configuration values based on path. + * @param path path to fetch server defaults + * @return server default configuration values for path + * @throws IOException an I/O error occurred + */ + public FsServerDefaults getServerDefaults(final Path path) + throws IOException { + return FsLinkResolution.resolve(this, + fixRelativePart(path), + (fs, p) -> fs.getServerDefaults(p)); + } + + /** + * Create a multipart uploader. + * @param basePath file path under which all files are uploaded + * @return a MultipartUploaderBuilder object to build the uploader + * @throws IOException if some early checks cause IO failures. + * @throws UnsupportedOperationException if support is checked early. + */ + @InterfaceStability.Unstable + public MultipartUploaderBuilder createMultipartUploader(Path basePath) + throws IOException { + return FsLinkResolution.resolve(this, + fixRelativePart(basePath), + (fs, p) -> fs.createMultipartUploader(p)); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index abb31ed869591..ab5040486dffc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -132,22 +132,35 @@ * New methods may be marked as Unstable or Evolving for their initial release, * as a warning that they are new and may change based on the * experience of use in applications. + *

* Important note for developers - * - * If you're making changes here to the public API or protected methods, + *

+ * If you are making changes here to the public API or protected methods, * you must review the following subclasses and make sure that * they are filtering/passing through new methods as appropriate. + *

* - * {@link FilterFileSystem}: methods are passed through. + * {@link FilterFileSystem}: methods are passed through. If not, + * then {@code TestFilterFileSystem.MustNotImplement} must be + * updated with the unsupported interface. + * Furthermore, if the new API's support is probed for via + * {@link #hasPathCapability(Path, String)} then + * {@link FilterFileSystem#hasPathCapability(Path, String)} + * must return false, always. + *

* {@link ChecksumFileSystem}: checksums are created and * verified. + *

* {@code TestHarFileSystem} will need its {@code MustNotImplement} * interface updated. + *

* * There are some external places your changes will break things. * Do co-ordinate changes here. + *

* * HBase: HBoss + *

* Hive: HiveShim23 * {@code shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java} * @@ -2695,7 +2708,7 @@ static void checkAccessPermissions(FileStatus stat, FsAction mode) if (perm.getUserAction().implies(mode)) { return; } - } else if (ugi.getGroups().contains(stat.getGroup())) { + } else if (ugi.getGroupsSet().contains(stat.getGroup())) { if (perm.getGroupAction().implies(mode)) { return; } @@ -4644,4 +4657,17 @@ public CompletableFuture build() throws IOException { } + /** + * Create a multipart uploader. + * @param basePath file path under which all files are uploaded + * @return a MultipartUploaderBuilder object to build the uploader + * @throws IOException if some early checks cause IO failures. + * @throws UnsupportedOperationException if support is checked early. + */ + @InterfaceStability.Unstable + public MultipartUploaderBuilder createMultipartUploader(Path basePath) + throws IOException { + methodNotSupported(); + return null; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java index cf12ea3898a7f..42410974db17c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java @@ -41,6 +41,8 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.Progressable; +import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; + /**************************************************************** * A FilterFileSystem contains * some other file system, which it uses as @@ -728,7 +730,16 @@ protected CompletableFuture openFileWithOptions( @Override public boolean hasPathCapability(final Path path, final String capability) throws IOException { - return fs.hasPathCapability(path, capability); + switch (validatePathCapabilityArgs(makeQualified(path), capability)) { + case CommonPathCapabilities.FS_MULTIPART_UPLOADER: + case CommonPathCapabilities.FS_EXPERIMENTAL_BATCH_LISTING: + // operations known to be unsupported, irrespective of what + // the wrapped class implements. + return false; + default: + // the feature is not implemented. + return fs.hasPathCapability(path, capability); + } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java index e197506edc88b..27e75d8a25df6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java @@ -448,4 +448,10 @@ public boolean hasPathCapability(final Path path, throws IOException { return myFs.hasPathCapability(path, capability); } + + @Override + public MultipartUploaderBuilder createMultipartUploader(final Path basePath) + throws IOException { + return myFs.createMultipartUploader(basePath); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java index 07c16b22358c1..344048f0ceeb1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java @@ -44,4 +44,6 @@ public interface FsConstants { public static final String VIEWFS_SCHEME = "viewfs"; String FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN = "fs.viewfs.overload.scheme.target.%s.impl"; + String VIEWFS_TYPE = "viewfs"; + String VIEWFSOS_TYPE = "viewfsOverloadScheme"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java index 76e379c51f605..1a8a77723176e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java @@ -112,7 +112,7 @@ protected void processPath(PathData item) throws IOException { // used by chown/chgrp static private String allowedChars = Shell.WINDOWS ? "[-_./@a-zA-Z0-9 ]" : - "[-_./@a-zA-Z0-9]"; + "[-+_./@a-zA-Z0-9]"; /** * Used to change owner and/or group of files diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InternalOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InternalOperations.java new file mode 100644 index 0000000000000..2db33eead9288 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InternalOperations.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; + + +/** + * This method allows access to Package-scoped operations from classes + * in org.apache.hadoop.fs.impl and other file system implementations + * in the hadoop modules. + * This is absolutely not for used by any other application or library. + */ +@InterfaceAudience.Private +public class InternalOperations { + + @SuppressWarnings("deprecation") // rename w/ OVERWRITE + public void rename(FileSystem fs, final Path src, final Path dst, + final Options.Rename...options) throws IOException { + fs.rename(src, dst, options); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java index 7ed987eed90dd..89848dc29ded0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,45 +15,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package org.apache.hadoop.fs; import java.io.Closeable; import java.io.IOException; import java.io.InputStream; import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.concurrent.CompletableFuture; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import static com.google.common.base.Preconditions.checkArgument; - /** * MultipartUploader is an interface for copying files multipart and across - * multiple nodes. Users should: - *
    - *
  1. Initialize an upload.
  2. - *
  3. Upload parts in any order.
  4. - *
  5. Complete the upload in order to have it materialize in the destination - * FS.
  6. - *
+ * multiple nodes. */ -@InterfaceAudience.Private +@InterfaceAudience.Public @InterfaceStability.Unstable -public abstract class MultipartUploader implements Closeable { - public static final Logger LOG = - LoggerFactory.getLogger(MultipartUploader.class); +public interface MultipartUploader extends Closeable { - /** - * Perform any cleanup. - * The upload is not required to support any operations after this. - * @throws IOException problems on close. - */ - @Override - public void close() throws IOException { - } /** * Initialize a multipart upload. @@ -61,94 +42,64 @@ public void close() throws IOException { * @return unique identifier associating part uploads. * @throws IOException IO failure */ - public abstract UploadHandle initialize(Path filePath) throws IOException; + CompletableFuture startUpload(Path filePath) + throws IOException; /** * Put part as part of a multipart upload. * It is possible to have parts uploaded in any order (or in parallel). - * @param filePath Target path for upload (same as {@link #initialize(Path)}). + * @param uploadId Identifier from {@link #startUpload(Path)}. + * @param partNumber Index of the part relative to others. + * @param filePath Target path for upload (as {@link #startUpload(Path)}). * @param inputStream Data for this part. Implementations MUST close this * stream after reading in the data. - * @param partNumber Index of the part relative to others. - * @param uploadId Identifier from {@link #initialize(Path)}. * @param lengthInBytes Target length to read from the stream. * @return unique PartHandle identifier for the uploaded part. * @throws IOException IO failure */ - public abstract PartHandle putPart(Path filePath, InputStream inputStream, - int partNumber, UploadHandle uploadId, long lengthInBytes) + CompletableFuture putPart( + UploadHandle uploadId, + int partNumber, + Path filePath, + InputStream inputStream, + long lengthInBytes) throws IOException; /** * Complete a multipart upload. - * @param filePath Target path for upload (same as {@link #initialize(Path)}. + * @param uploadId Identifier from {@link #startUpload(Path)}. + * @param filePath Target path for upload (as {@link #startUpload(Path)}. * @param handles non-empty map of part number to part handle. - * from {@link #putPart(Path, InputStream, int, UploadHandle, long)}. - * @param multipartUploadId Identifier from {@link #initialize(Path)}. + * from {@link #putPart(UploadHandle, int, Path, InputStream, long)}. * @return unique PathHandle identifier for the uploaded file. * @throws IOException IO failure */ - public abstract PathHandle complete(Path filePath, - Map handles, - UploadHandle multipartUploadId) + CompletableFuture complete( + UploadHandle uploadId, + Path filePath, + Map handles) throws IOException; /** * Aborts a multipart upload. - * @param filePath Target path for upload (same as {@link #initialize(Path)}. - * @param multipartUploadId Identifier from {@link #initialize(Path)}. + * @param uploadId Identifier from {@link #startUpload(Path)}. + * @param filePath Target path for upload (same as {@link #startUpload(Path)}. * @throws IOException IO failure + * @return a future; the operation will have completed */ - public abstract void abort(Path filePath, UploadHandle multipartUploadId) + CompletableFuture abort(UploadHandle uploadId, Path filePath) throws IOException; /** - * Utility method to validate uploadIDs. - * @param uploadId Upload ID - * @throws IllegalArgumentException invalid ID - */ - protected void checkUploadId(byte[] uploadId) - throws IllegalArgumentException { - checkArgument(uploadId != null, "null uploadId"); - checkArgument(uploadId.length > 0, - "Empty UploadId is not valid"); - } - - /** - * Utility method to validate partHandles. - * @param partHandles handles - * @throws IllegalArgumentException if the parts are invalid + * Best effort attempt to aborts multipart uploads under a path. + * Not all implementations support this, and those which do may + * be vulnerable to eventually consistent listings of current uploads + * -some may be missed. + * @param path path to abort uploads under. + * @return a future to the number of entries aborted; + * -1 if aborting is unsupported + * @throws IOException IO failure */ - protected void checkPartHandles(Map partHandles) { - checkArgument(!partHandles.isEmpty(), - "Empty upload"); - partHandles.keySet() - .stream() - .forEach(key -> - checkArgument(key > 0, - "Invalid part handle index %s", key)); - } + CompletableFuture abortUploadsUnderPath(Path path) throws IOException; - /** - * Check all the arguments to the - * {@link #putPart(Path, InputStream, int, UploadHandle, long)} operation. - * @param filePath Target path for upload (same as {@link #initialize(Path)}). - * @param inputStream Data for this part. Implementations MUST close this - * stream after reading in the data. - * @param partNumber Index of the part relative to others. - * @param uploadId Identifier from {@link #initialize(Path)}. - * @param lengthInBytes Target length to read from the stream. - * @throws IllegalArgumentException invalid argument - */ - protected void checkPutArguments(Path filePath, - InputStream inputStream, - int partNumber, - UploadHandle uploadId, - long lengthInBytes) throws IllegalArgumentException { - checkArgument(filePath != null, "null filePath"); - checkArgument(inputStream != null, "null inputStream"); - checkArgument(partNumber > 0, "Invalid part number: %d", partNumber); - checkArgument(uploadId != null, "null uploadId"); - checkArgument(lengthInBytes >= 0, "Invalid part length: %d", lengthInBytes); - } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderBuilder.java new file mode 100644 index 0000000000000..381bfaa07f6d1 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderBuilder.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import javax.annotation.Nonnull; +import java.io.IOException; + +import org.apache.hadoop.fs.permission.FsPermission; + +/** + * Builder interface for Multipart readers. + * @param + * @param + */ +public interface MultipartUploaderBuilder> + extends FSBuilder { + + /** + * Set permission for the file. + */ + B permission(@Nonnull FsPermission perm); + + /** + * Set the size of the buffer to be used. + */ + B bufferSize(int bufSize); + + /** + * Set replication factor. + */ + B replication(short replica); + + /** + * Set block size. + */ + B blockSize(long blkSize); + + /** + * Create an FSDataOutputStream at the specified path. + */ + B create(); + + /** + * Set to true to overwrite the existing file. + * Set it to false, an exception will be thrown when calling {@link #build()} + * if the file exists. + */ + B overwrite(boolean overwrite); + + /** + * Append to an existing file (optional operation). + */ + B append(); + + /** + * Set checksum opt. + */ + B checksumOpt(@Nonnull Options.ChecksumOpt chksumOpt); + + /** + * Create the FSDataOutputStream to write on the file system. + * + * @throws IllegalArgumentException if the parameters are not valid. + * @throws IOException on errors when file system creates or appends the file. + */ + S build() throws IllegalArgumentException, IOException; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java deleted file mode 100644 index e35b6bf18bbd6..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Iterator; -import java.util.ServiceLoader; - -/** - * {@link ServiceLoader}-driven uploader API for storage services supporting - * multipart uploads. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public abstract class MultipartUploaderFactory { - public static final Logger LOG = - LoggerFactory.getLogger(MultipartUploaderFactory.class); - - /** - * Multipart Uploaders listed as services. - */ - private static ServiceLoader serviceLoader = - ServiceLoader.load(MultipartUploaderFactory.class, - MultipartUploaderFactory.class.getClassLoader()); - - // Iterate through the serviceLoader to avoid lazy loading. - // Lazy loading would require synchronization in concurrent use cases. - static { - Iterator iterServices = serviceLoader.iterator(); - while (iterServices.hasNext()) { - iterServices.next(); - } - } - - /** - * Get the multipart loader for a specific filesystem. - * @param fs filesystem - * @param conf configuration - * @return an uploader, or null if one was found. - * @throws IOException failure during the creation process. - */ - public static MultipartUploader get(FileSystem fs, Configuration conf) - throws IOException { - MultipartUploader mpu = null; - for (MultipartUploaderFactory factory : serviceLoader) { - mpu = factory.createMultipartUploader(fs, conf); - if (mpu != null) { - break; - } - } - return mpu; - } - - protected abstract MultipartUploader createMultipartUploader(FileSystem fs, - Configuration conf) throws IOException; -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index cf2210575da15..72eeb99a4ea5d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -64,6 +64,7 @@ public class RawLocalFileSystem extends FileSystem { static final URI NAME = URI.create("file:///"); private Path workingDir; + private long defaultBlockSize; // Temporary workaround for HADOOP-9652. private static boolean useDeprecatedFileStatus = true; @@ -100,6 +101,7 @@ public File pathToFile(Path path) { public void initialize(URI uri, Configuration conf) throws IOException { super.initialize(uri, conf); setConf(conf); + defaultBlockSize = getDefaultBlockSize(new Path(uri)); } /******************************************************* @@ -518,7 +520,12 @@ public FileStatus[] listStatus(Path f) throws IOException { } return new FileStatus[] { new DeprecatedRawLocalFileStatus(localf, - getDefaultBlockSize(f), this) }; + defaultBlockSize, this) }; + } + + @Override + public boolean exists(Path f) throws IOException { + return pathToFile(f).exists(); } protected boolean mkOneDir(File p2f) throws IOException { @@ -663,7 +670,7 @@ private FileStatus deprecatedGetFileStatus(Path f) throws IOException { File path = pathToFile(f); if (path.exists()) { return new DeprecatedRawLocalFileStatus(pathToFile(f), - getDefaultBlockSize(f), this); + defaultBlockSize, this); } else { throw new FileNotFoundException("File " + f + " does not exist"); } @@ -1051,7 +1058,7 @@ private FileStatus deprecatedGetFileLinkStatusInternal(final Path f) private FileStatus getNativeFileLinkStatus(final Path f, boolean dereference) throws IOException { checkPath(f); - Stat stat = new Stat(f, getDefaultBlockSize(f), dereference, this); + Stat stat = new Stat(f, defaultBlockSize, dereference, this); FileStatus status = stat.getFileStatus(); return status; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java index 5e80a140175e6..f6c2f2af1c9ba 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java @@ -20,8 +20,7 @@ import java.io.BufferedReader; import java.io.FileNotFoundException; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; +import java.util.Collections; import java.util.NoSuchElementException; import java.util.StringTokenizer; @@ -65,9 +64,7 @@ public Stat(Path path, long blockSize, boolean deref, FileSystem fs) this.blockSize = blockSize; this.dereference = deref; // LANG = C setting - Map env = new HashMap(); - env.put("LANG", "C"); - setEnvironment(env); + setEnvironment(Collections.singletonMap("LANG", "C")); } public FileStatus getFileStatus() throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java new file mode 100644 index 0000000000000..d8b7fe0744087 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.impl; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CompletableFuture; + +import com.google.common.base.Preconditions; + +import org.apache.hadoop.fs.MultipartUploader; +import org.apache.hadoop.fs.PartHandle; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.UploadHandle; + +import static com.google.common.base.Preconditions.checkArgument; + +/** + * Standard base class for Multipart Uploaders. + */ +public abstract class AbstractMultipartUploader implements MultipartUploader { + + /** + * Base path of upload. + */ + private final Path basePath; + + /** + * Instantiate. + * @param basePath base path + */ + protected AbstractMultipartUploader(final Path basePath) { + this.basePath = Objects.requireNonNull(basePath, "null path"); + } + + /** + * Perform any cleanup. + * The upload is not required to support any operations after this. + * @throws IOException problems on close. + */ + @Override + public void close() throws IOException { + } + + protected Path getBasePath() { + return basePath; + } + + /** + * Validate a path. + * @param path path to check. + */ + protected void checkPath(Path path) { + Objects.requireNonNull(path, "null path"); + Preconditions.checkArgument(path.toString().startsWith(basePath.toString()), + "Path %s is not under %s", path, basePath); + } + + /** + * Utility method to validate uploadIDs. + * @param uploadId Upload ID + * @throws IllegalArgumentException invalid ID + */ + protected void checkUploadId(byte[] uploadId) + throws IllegalArgumentException { + checkArgument(uploadId != null, "null uploadId"); + checkArgument(uploadId.length > 0, + "Empty UploadId is not valid"); + } + + /** + * Utility method to validate partHandles. + * @param partHandles handles + * @throws IllegalArgumentException if the parts are invalid + */ + protected void checkPartHandles(Map partHandles) { + checkArgument(!partHandles.isEmpty(), + "Empty upload"); + partHandles.keySet() + .stream() + .forEach(key -> + checkArgument(key > 0, + "Invalid part handle index %s", key)); + } + + /** + * Check all the arguments to the + * {@link MultipartUploader#putPart(UploadHandle, int, Path, InputStream, long)} + * operation. + * @param filePath Target path for upload (as {@link #startUpload(Path)}). + * @param inputStream Data for this part. Implementations MUST close this + * stream after reading in the data. + * @param partNumber Index of the part relative to others. + * @param uploadId Identifier from {@link #startUpload(Path)}. + * @param lengthInBytes Target length to read from the stream. + * @throws IllegalArgumentException invalid argument + */ + protected void checkPutArguments(Path filePath, + InputStream inputStream, + int partNumber, + UploadHandle uploadId, + long lengthInBytes) throws IllegalArgumentException { + checkPath(filePath); + checkArgument(inputStream != null, "null inputStream"); + checkArgument(partNumber > 0, "Invalid part number: %d", partNumber); + checkArgument(uploadId != null, "null uploadId"); + checkArgument(lengthInBytes >= 0, "Invalid part length: %d", lengthInBytes); + } + + /** + * {@inheritDoc}. + * @param path path to abort uploads under. + * @return a future to -1. + * @throws IOException + */ + public CompletableFuture abortUploadsUnderPath(Path path) + throws IOException { + checkPath(path); + CompletableFuture f = new CompletableFuture<>(); + f.complete(-1); + return f; + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java similarity index 52% rename from hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java index b77c244220a9e..ae0def0e378d4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java @@ -14,24 +14,42 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.fs; + +package org.apache.hadoop.fs.impl; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Comparator; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.UUID; +import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; import com.google.common.base.Charsets; +import com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.commons.compress.utils.IOUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BBPartHandle; +import org.apache.hadoop.fs.BBUploadHandle; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FSDataOutputStreamBuilder; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.InternalOperations; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.PartHandle; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathHandle; +import org.apache.hadoop.fs.UploadHandle; import org.apache.hadoop.fs.permission.FsPermission; import static org.apache.hadoop.fs.Path.mergePaths; @@ -50,40 +68,82 @@ */ @InterfaceAudience.Private @InterfaceStability.Unstable -public class FileSystemMultipartUploader extends MultipartUploader { +public class FileSystemMultipartUploader extends AbstractMultipartUploader { + + private static final Logger LOG = LoggerFactory.getLogger( + FileSystemMultipartUploader.class); private final FileSystem fs; - public FileSystemMultipartUploader(FileSystem fs) { + private final FileSystemMultipartUploaderBuilder builder; + + private final FsPermission permission; + + private final long blockSize; + + private final Options.ChecksumOpt checksumOpt; + + public FileSystemMultipartUploader( + final FileSystemMultipartUploaderBuilder builder, + FileSystem fs) { + super(builder.getPath()); + this.builder = builder; this.fs = fs; + blockSize = builder.getBlockSize(); + checksumOpt = builder.getChecksumOpt(); + permission = builder.getPermission(); } @Override - public UploadHandle initialize(Path filePath) throws IOException { - Path collectorPath = createCollectorPath(filePath); - fs.mkdirs(collectorPath, FsPermission.getDirDefault()); + public CompletableFuture startUpload(Path filePath) + throws IOException { + checkPath(filePath); + return FutureIOSupport.eval(() -> { + Path collectorPath = createCollectorPath(filePath); + fs.mkdirs(collectorPath, FsPermission.getDirDefault()); - ByteBuffer byteBuffer = ByteBuffer.wrap( - collectorPath.toString().getBytes(Charsets.UTF_8)); - return BBUploadHandle.from(byteBuffer); + ByteBuffer byteBuffer = ByteBuffer.wrap( + collectorPath.toString().getBytes(Charsets.UTF_8)); + return BBUploadHandle.from(byteBuffer); + }); } @Override - public PartHandle putPart(Path filePath, InputStream inputStream, - int partNumber, UploadHandle uploadId, long lengthInBytes) + public CompletableFuture putPart(UploadHandle uploadId, + int partNumber, Path filePath, + InputStream inputStream, + long lengthInBytes) throws IOException { checkPutArguments(filePath, inputStream, partNumber, uploadId, lengthInBytes); + return FutureIOSupport.eval(() -> innerPutPart(filePath, + inputStream, partNumber, uploadId, lengthInBytes)); + } + + private PartHandle innerPutPart(Path filePath, + InputStream inputStream, + int partNumber, + UploadHandle uploadId, + long lengthInBytes) + throws IOException { byte[] uploadIdByteArray = uploadId.toByteArray(); checkUploadId(uploadIdByteArray); Path collectorPath = new Path(new String(uploadIdByteArray, 0, uploadIdByteArray.length, Charsets.UTF_8)); Path partPath = mergePaths(collectorPath, mergePaths(new Path(Path.SEPARATOR), - new Path(Integer.toString(partNumber) + ".part"))); - try(FSDataOutputStream fsDataOutputStream = - fs.createFile(partPath).build()) { - IOUtils.copy(inputStream, fsDataOutputStream, 4096); + new Path(partNumber + ".part"))); + final FSDataOutputStreamBuilder fileBuilder = fs.createFile(partPath); + if (checksumOpt != null) { + fileBuilder.checksumOpt(checksumOpt); + } + if (permission != null) { + fileBuilder.permission(permission); + } + try (FSDataOutputStream fsDataOutputStream = + fileBuilder.blockSize(blockSize).build()) { + IOUtils.copy(inputStream, fsDataOutputStream, + this.builder.getBufferSize()); } finally { cleanupWithLogger(LOG, inputStream); } @@ -106,16 +166,36 @@ private PathHandle getPathHandle(Path filePath) throws IOException { private long totalPartsLen(List partHandles) throws IOException { long totalLen = 0; - for (Path p: partHandles) { + for (Path p : partHandles) { totalLen += fs.getFileStatus(p).getLen(); } return totalLen; } @Override - @SuppressWarnings("deprecation") // rename w/ OVERWRITE - public PathHandle complete(Path filePath, Map handleMap, - UploadHandle multipartUploadId) throws IOException { + public CompletableFuture complete( + UploadHandle uploadId, + Path filePath, + Map handleMap) throws IOException { + + checkPath(filePath); + return FutureIOSupport.eval(() -> + innerComplete(uploadId, filePath, handleMap)); + } + + /** + * The upload complete operation. + * @param multipartUploadId the ID of the upload + * @param filePath path + * @param handleMap map of handles + * @return the path handle + * @throws IOException failure + */ + private PathHandle innerComplete( + UploadHandle multipartUploadId, Path filePath, + Map handleMap) throws IOException { + + checkPath(filePath); checkUploadId(multipartUploadId.toByteArray()); @@ -133,6 +213,13 @@ public PathHandle complete(Path filePath, Map handleMap, }) .collect(Collectors.toList()); + int count = partHandles.size(); + // built up to identify duplicates -if the size of this set is + // below that of the number of parts, then there's a duplicate entry. + Set values = new HashSet<>(count); + values.addAll(partHandles); + Preconditions.checkArgument(values.size() == count, + "Duplicate PartHandles"); byte[] uploadIdByteArray = multipartUploadId.toByteArray(); Path collectorPath = new Path(new String(uploadIdByteArray, 0, uploadIdByteArray.length, Charsets.UTF_8)); @@ -146,35 +233,30 @@ public PathHandle complete(Path filePath, Map handleMap, fs.create(filePathInsideCollector).close(); fs.concat(filePathInsideCollector, partHandles.toArray(new Path[handles.size()])); - fs.rename(filePathInsideCollector, filePath, Options.Rename.OVERWRITE); + new InternalOperations() + .rename(fs, filePathInsideCollector, filePath, + Options.Rename.OVERWRITE); } fs.delete(collectorPath, true); return getPathHandle(filePath); } @Override - public void abort(Path filePath, UploadHandle uploadId) throws IOException { + public CompletableFuture abort(UploadHandle uploadId, + Path filePath) + throws IOException { + checkPath(filePath); byte[] uploadIdByteArray = uploadId.toByteArray(); checkUploadId(uploadIdByteArray); Path collectorPath = new Path(new String(uploadIdByteArray, 0, uploadIdByteArray.length, Charsets.UTF_8)); - // force a check for a file existing; raises FNFE if not found - fs.getFileStatus(collectorPath); - fs.delete(collectorPath, true); - } - - /** - * Factory for creating MultipartUploaderFactory objects for file:// - * filesystems. - */ - public static class Factory extends MultipartUploaderFactory { - protected MultipartUploader createMultipartUploader(FileSystem fs, - Configuration conf) { - if (fs.getScheme().equals("file")) { - return new FileSystemMultipartUploader(fs); - } + return FutureIOSupport.eval(() -> { + // force a check for a file existing; raises FNFE if not found + fs.getFileStatus(collectorPath); + fs.delete(collectorPath, true); return null; - } + }); } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploaderBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploaderBuilder.java new file mode 100644 index 0000000000000..7c4d995c69d1b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploaderBuilder.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.impl; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.util.EnumSet; + +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; + +/** + * Builder for {@link FileSystemMultipartUploader}. + */ +public class FileSystemMultipartUploaderBuilder extends + MultipartUploaderBuilderImpl { + + public FileSystemMultipartUploaderBuilder( + @Nonnull final FileSystem fileSystem, + @Nonnull final Path path) { + super(fileSystem, path); + } + + @Override + public FileSystemMultipartUploaderBuilder getThisBuilder() { + return this; + } + + @Override + public FileSystemMultipartUploader build() + throws IllegalArgumentException, IOException { + return new FileSystemMultipartUploader(this, getFS()); + } + + @Override + public FileSystem getFS() { + return super.getFS(); + } + + @Override + public FsPermission getPermission() { + return super.getPermission(); + } + + @Override + public int getBufferSize() { + return super.getBufferSize(); + } + + @Override + public short getReplication() { + return super.getReplication(); + } + + @Override + public EnumSet getFlags() { + return super.getFlags(); + } + + @Override + public Options.ChecksumOpt getChecksumOpt() { + return super.getChecksumOpt(); + } + + @Override + protected long getBlockSize() { + return super.getBlockSize(); + } + + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java index 26856e5b935e0..f13d701803d7e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.io.InterruptedIOException; import java.util.Map; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; @@ -52,7 +53,7 @@ private FutureIOSupport() { * @throws IOException if something went wrong * @throws RuntimeException any nested RTE thrown */ - public static T awaitFuture(final Future future) + public static T awaitFuture(final Future future) throws InterruptedIOException, IOException, RuntimeException { try { return future.get(); @@ -224,4 +225,29 @@ public static void propagateOptions( } } } + + /** + * Evaluate a CallableRaisingIOE in the current thread, + * converting IOEs to RTEs and propagating. + * @param callable callable to invoke + * @param Return type. + * @return the evaluated result. + * @throws UnsupportedOperationException fail fast if unsupported + * @throws IllegalArgumentException invalid argument + */ + public static CompletableFuture eval( + FunctionsRaisingIOE.CallableRaisingIOE callable) { + CompletableFuture result = new CompletableFuture<>(); + try { + result.complete(callable.apply()); + } catch (UnsupportedOperationException | IllegalArgumentException tx) { + // fail fast here + throw tx; + } catch (Throwable tx) { + // fail lazily here to ensure callers expect all File IO operations to + // surface later + result.completeExceptionally(tx); + } + return result; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java new file mode 100644 index 0000000000000..6c3336e6882b3 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java @@ -0,0 +1,215 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.impl; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.util.EnumSet; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.MultipartUploader; +import org.apache.hadoop.fs.MultipartUploaderBuilder; +import org.apache.hadoop.fs.Options.ChecksumOpt; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; + +import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; + +/** + * Builder for {@link MultipartUploader} implementations. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class MultipartUploaderBuilderImpl + > + extends AbstractFSBuilderImpl + implements MultipartUploaderBuilder { + + private final FileSystem fs; + + private FsPermission permission; + + private int bufferSize; + + private short replication; + + private long blockSize; + + private final EnumSet flags = EnumSet.noneOf(CreateFlag.class); + + private ChecksumOpt checksumOpt; + + /** + * Return the concrete implementation of the builder instance. + */ + public abstract B getThisBuilder(); + + /** + * Construct from a {@link FileContext}. + * + * @param fc FileContext + * @param p path. + * @throws IOException failure + */ + protected MultipartUploaderBuilderImpl(@Nonnull FileContext fc, + @Nonnull Path p) throws IOException { + super(checkNotNull(p)); + checkNotNull(fc); + this.fs = null; + + FsServerDefaults defaults = fc.getServerDefaults(p); + bufferSize = defaults.getFileBufferSize(); + replication = defaults.getReplication(); + blockSize = defaults.getBlockSize(); + } + + /** + * Constructor. + */ + protected MultipartUploaderBuilderImpl(@Nonnull FileSystem fileSystem, + @Nonnull Path p) { + super(fileSystem.makeQualified(checkNotNull(p))); + checkNotNull(fileSystem); + fs = fileSystem; + bufferSize = fs.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, + IO_FILE_BUFFER_SIZE_DEFAULT); + replication = fs.getDefaultReplication(p); + blockSize = fs.getDefaultBlockSize(p); + } + + protected FileSystem getFS() { + checkNotNull(fs); + return fs; + } + + protected FsPermission getPermission() { + if (permission == null) { + permission = FsPermission.getFileDefault(); + } + return permission; + } + + /** + * Set permission for the file. + */ + @Override + public B permission(@Nonnull final FsPermission perm) { + checkNotNull(perm); + permission = perm; + return getThisBuilder(); + } + + protected int getBufferSize() { + return bufferSize; + } + + /** + * Set the size of the buffer to be used. + */ + @Override + public B bufferSize(int bufSize) { + bufferSize = bufSize; + return getThisBuilder(); + } + + protected short getReplication() { + return replication; + } + + /** + * Set replication factor. + */ + @Override + public B replication(short replica) { + replication = replica; + return getThisBuilder(); + } + + protected long getBlockSize() { + return blockSize; + } + + /** + * Set block size. + */ + @Override + public B blockSize(long blkSize) { + blockSize = blkSize; + return getThisBuilder(); + } + + protected EnumSet getFlags() { + return flags; + } + + /** + * Create an FSDataOutputStream at the specified path. + */ + @Override + public B create() { + flags.add(CreateFlag.CREATE); + return getThisBuilder(); + } + + /** + * Set to true to overwrite the existing file. + * Set it to false, an exception will be thrown when calling {@link #build()} + * if the file exists. + */ + @Override + public B overwrite(boolean overwrite) { + if (overwrite) { + flags.add(CreateFlag.OVERWRITE); + } else { + flags.remove(CreateFlag.OVERWRITE); + } + return getThisBuilder(); + } + + /** + * Append to an existing file (optional operation). + */ + @Override + public B append() { + flags.add(CreateFlag.APPEND); + return getThisBuilder(); + } + + protected ChecksumOpt getChecksumOpt() { + return checksumOpt; + } + + /** + * Set checksum opt. + */ + @Override + public B checksumOpt(@Nonnull final ChecksumOpt chksumOpt) { + checkNotNull(chksumOpt); + checksumOpt = chksumOpt; + return getThisBuilder(); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java index 3c9368ca2ed9b..184b674adcc27 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java @@ -20,6 +20,7 @@ import java.io.FileNotFoundException; import java.io.IOException; +import java.util.Collections; import java.util.LinkedList; import java.util.List; @@ -97,7 +98,7 @@ protected List expandArgument(String arg) throws IOException { throw e; } // prevent -f on a non-existent glob from failing - return new LinkedList(); + return Collections.emptyList(); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java index 6596527738058..64aade3df9539 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java @@ -128,7 +128,8 @@ private void addToUsagesTable(URI uri, FsStatus fsStatus, @Override protected void processPath(PathData item) throws IOException { - if (ViewFileSystemUtil.isViewFileSystem(item.fs)) { + if (ViewFileSystemUtil.isViewFileSystem(item.fs) + || ViewFileSystemUtil.isViewFileSystemOverloadScheme(item.fs)) { ViewFileSystem viewFileSystem = (ViewFileSystem) item.fs; Map fsStatusMap = ViewFileSystemUtil.getStatus(viewFileSystem, item.path); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java index 6dd1f6589478e..7d29b8f44ca62 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java @@ -66,8 +66,7 @@ public static void addLink(Configuration conf, final String mountTableName, */ public static void addLink(final Configuration conf, final String src, final URI target) { - addLink( conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, - src, target); + addLink(conf, getDefaultMountTableName(conf), src, target); } /** @@ -88,8 +87,7 @@ public static void addLinkMergeSlash(Configuration conf, * @param target */ public static void addLinkMergeSlash(Configuration conf, final URI target) { - addLinkMergeSlash(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, - target); + addLinkMergeSlash(conf, getDefaultMountTableName(conf), target); } /** @@ -110,8 +108,7 @@ public static void addLinkFallback(Configuration conf, * @param target */ public static void addLinkFallback(Configuration conf, final URI target) { - addLinkFallback(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, - target); + addLinkFallback(conf, getDefaultMountTableName(conf), target); } /** @@ -132,7 +129,7 @@ public static void addLinkMerge(Configuration conf, * @param targets */ public static void addLinkMerge(Configuration conf, final URI[] targets) { - addLinkMerge(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, targets); + addLinkMerge(conf, getDefaultMountTableName(conf), targets); } /** @@ -166,8 +163,7 @@ public static void addLinkNfly(Configuration conf, String mountTableName, public static void addLinkNfly(final Configuration conf, final String src, final URI ... targets) { - addLinkNfly(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, src, null, - targets); + addLinkNfly(conf, getDefaultMountTableName(conf), src, null, targets); } /** @@ -177,8 +173,7 @@ public static void addLinkNfly(final Configuration conf, final String src, */ public static void setHomeDirConf(final Configuration conf, final String homedir) { - setHomeDirConf( conf, - Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, homedir); + setHomeDirConf(conf, getDefaultMountTableName(conf), homedir); } /** @@ -202,7 +197,7 @@ public static void setHomeDirConf(final Configuration conf, * @return home dir value, null if variable is not in conf */ public static String getHomeDirValue(final Configuration conf) { - return getHomeDirValue(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE); + return getHomeDirValue(conf, getDefaultMountTableName(conf)); } /** @@ -216,4 +211,18 @@ public static String getHomeDirValue(final Configuration conf, return conf.get(getConfigViewFsPrefix(mountTableName) + "." + Constants.CONFIG_VIEWFS_HOMEDIR); } + + /** + * Get the name of the default mount table to use. If + * {@link Constants#CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY} is specified, + * it's value is returned. Otherwise, + * {@link Constants#CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE} is returned. + * + * @param conf Configuration to use. + * @return the name of the default mount table to use. + */ + public static String getDefaultMountTableName(final Configuration conf) { + return conf.get(Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY, + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java index 0a5d4b46ce2d8..492cb87ee024e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java @@ -41,12 +41,18 @@ public interface Constants { * then the hadoop default value (/user) is used. */ public static final String CONFIG_VIEWFS_HOMEDIR = "homedir"; - + + /** + * Config key to specify the name of the default mount table. + */ + String CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY = + "fs.viewfs.mounttable.default.name.key"; + /** * Config variable name for the default mount table. */ public static final String CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE = "default"; - + /** * Config variable full prefix for the default mount table. */ @@ -90,4 +96,25 @@ public interface Constants { String CONFIG_VIEWFS_ENABLE_INNER_CACHE = "fs.viewfs.enable.inner.cache"; boolean CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT = true; + + /** + * Enable ViewFileSystem to show mountlinks as symlinks. + */ + String CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS = + "fs.viewfs.mount.links.as.symlinks"; + + boolean CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT = true; + + /** + * When initializing the viewfs, authority will be used as the mount table + * name to find the mount link configurations. To make the mount table name + * unique, we may want to ignore port if initialized uri authority contains + * port number. By default, we will consider port number also in + * ViewFileSystem(This default value false, because to support existing + * deployments continue with the current behavior). + */ + String CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME = + "fs.viewfs.ignore.port.in.mount.table.name"; + + boolean CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT = false; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java index 50c839b52b654..422e7337b57fb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java @@ -34,6 +34,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.security.UserGroupInformation; @@ -67,7 +68,7 @@ enum ResultKind { // the root of the mount table private final INode root; // the fallback filesystem - private final INodeLink rootFallbackLink; + private INodeLink rootFallbackLink; // the homedir for this mount table private final String homedirPrefix; private List> mountPoints = new ArrayList>(); @@ -374,7 +375,7 @@ protected abstract T getTargetFileSystem(URI uri) throws UnsupportedFileSystemException, URISyntaxException, IOException; protected abstract T getTargetFileSystem(INodeDir dir) - throws URISyntaxException; + throws URISyntaxException, IOException; protected abstract T getTargetFileSystem(String settings, URI[] mergeFsURIs) throws UnsupportedFileSystemException, URISyntaxException, IOException; @@ -393,7 +394,7 @@ private boolean hasFallbackLink() { return rootFallbackLink != null; } - private INodeLink getRootFallbackLink() { + protected INodeLink getRootFallbackLink() { Preconditions.checkState(root.isInternalDir()); return rootFallbackLink; } @@ -460,12 +461,13 @@ Configuration getConfig() { * @throws FileAlreadyExistsException * @throws IOException */ - protected InodeTree(final Configuration config, final String viewName) + protected InodeTree(final Configuration config, final String viewName, + final URI theUri, boolean initingUriAsFallbackOnNoMounts) throws UnsupportedFileSystemException, URISyntaxException, FileAlreadyExistsException, IOException { String mountTableName = viewName; if (mountTableName == null) { - mountTableName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE; + mountTableName = ConfigUtil.getDefaultMountTableName(config); } homedirPrefix = ConfigUtil.getHomeDirValue(config, mountTableName); @@ -596,9 +598,19 @@ protected InodeTree(final Configuration config, final String viewName) } if (!gotMountTableEntry) { - throw new IOException( - "ViewFs: Cannot initialize: Empty Mount table in config for " + - "viewfs://" + mountTableName + "/"); + if (!initingUriAsFallbackOnNoMounts) { + throw new IOException( + "ViewFs: Cannot initialize: Empty Mount table in config for " + + "viewfs://" + mountTableName + "/"); + } + StringBuilder msg = + new StringBuilder("Empty mount table detected for ").append(theUri) + .append(" and considering itself as a linkFallback."); + FileSystem.LOG.info(msg.toString()); + rootFallbackLink = + new INodeLink(mountTableName, ugi, getTargetFileSystem(theUri), + theUri); + getRootDir().addFallbackLink(rootFallbackLink); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 4f02feeebec8b..1fc531e05635d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -20,6 +20,10 @@ import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE; import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT; import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555; import java.io.FileNotFoundException; @@ -39,6 +43,7 @@ import java.util.Objects; import java.util.Set; +import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -253,6 +258,14 @@ public String getScheme() { return FsConstants.VIEWFS_SCHEME; } + /** + * Returns the ViewFileSystem type. + * @return viewfs + */ + String getType() { + return FsConstants.VIEWFS_TYPE; + } + /** * Called after a new FileSystem instance is constructed. * @param theUri a uri whose authority section names the host, port, etc. for @@ -271,9 +284,18 @@ public void initialize(final URI theUri, final Configuration conf) final InnerCache innerCache = new InnerCache(fsGetter); // Now build client side view (i.e. client side mount table) from config. final String authority = theUri.getAuthority(); + String tableName = authority; + if (theUri.getPort() != -1 && config + .getBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, + CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT)) { + tableName = theUri.getHost(); + } try { myUri = new URI(getScheme(), authority, "/", null, null); - fsState = new InodeTree(conf, authority) { + boolean initingUriAsFallbackOnNoMounts = + !FsConstants.VIEWFS_TYPE.equals(getType()); + fsState = new InodeTree(conf, tableName, theUri, + initingUriAsFallbackOnNoMounts) { @Override protected FileSystem getTargetFileSystem(final URI uri) throws URISyntaxException, IOException { @@ -288,8 +310,9 @@ protected FileSystem getTargetFileSystem(final URI uri) @Override protected FileSystem getTargetFileSystem(final INodeDir dir) - throws URISyntaxException { - return new InternalDirOfViewFs(dir, creationTime, ugi, myUri, config); + throws URISyntaxException { + return new InternalDirOfViewFs(dir, creationTime, ugi, myUri, config, + this); } @Override @@ -488,6 +511,14 @@ private static FileStatus wrapLocalFileStatus(FileStatus orig, : new ViewFsFileStatus(orig, qualified); } + /** + * {@inheritDoc} + * + * If the given path is a symlink(mount link), the path will be resolved to a + * target path and it will get the resolved path's FileStatus object. It will + * not be represented as a symlink and isDirectory API returns true if the + * resolved path is a directory, false otherwise. + */ @Override public FileStatus getFileStatus(final Path f) throws AccessControlException, FileNotFoundException, IOException { @@ -505,6 +536,33 @@ public void access(Path path, FsAction mode) throws AccessControlException, res.targetFileSystem.access(res.remainingPath, mode); } + /** + * {@inheritDoc} + * + * Note: listStatus considers listing from fallbackLink if available. If the + * same directory path is present in configured mount path as well as in + * fallback fs, then only the fallback path will be listed in the returned + * result except for link. + * + * If any of the the immediate children of the given path f is a symlink(mount + * link), the returned FileStatus object of that children would be represented + * as a symlink. It will not be resolved to the target path and will not get + * the target path FileStatus object. The target path will be available via + * getSymlink on that children's FileStatus object. Since it represents as + * symlink, isDirectory on that children's FileStatus will return false. + * This behavior can be changed by setting an advanced configuration + * fs.viewfs.mount.links.as.symlinks to false. In this case, mount points will + * be represented as non-symlinks and all the file/directory attributes like + * permissions, isDirectory etc will be assigned from it's resolved target + * directory/file. + * + * If you want to get the FileStatus of target path for that children, you may + * want to use GetFileStatus API with that children's symlink path. Please see + * {@link ViewFileSystem#getFileStatus(Path f)} + * + * Note: In ViewFileSystem, by default the mount links are represented as + * symlinks. + */ @Override public FileStatus[] listStatus(final Path f) throws AccessControlException, FileNotFoundException, IOException { @@ -1087,11 +1145,14 @@ static class InternalDirOfViewFs extends FileSystem { final long creationTime; // of the the mount table final UserGroupInformation ugi; // the user/group of user who created mtable final URI myUri; + private final boolean showMountLinksAsSymlinks; + private InodeTree fsState; public InternalDirOfViewFs(final InodeTree.INodeDir dir, final long cTime, final UserGroupInformation ugi, URI uri, - Configuration config) throws URISyntaxException { + Configuration config, InodeTree fsState) throws URISyntaxException { myUri = uri; + this.fsState = fsState; try { initialize(myUri, config); } catch (IOException e) { @@ -1100,6 +1161,9 @@ public InternalDirOfViewFs(final InodeTree.INodeDir dir, theInternalDir = dir; creationTime = cTime; this.ugi = ugi; + showMountLinksAsSymlinks = config + .getBoolean(CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, + CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT); } static private void checkPathIsSlash(final Path f) throws IOException { @@ -1136,7 +1200,41 @@ public FSDataOutputStream append(final Path f, final int bufferSize, public FSDataOutputStream create(final Path f, final FsPermission permission, final boolean overwrite, final int bufferSize, final short replication, final long blockSize, - final Progressable progress) throws AccessControlException { + final Progressable progress) throws IOException { + Preconditions.checkNotNull(f, "File cannot be null."); + if (InodeTree.SlashPath.equals(f)) { + throw new FileAlreadyExistsException( + "/ is not a file. The directory / already exist at: " + + theInternalDir.fullPath); + } + + if (this.fsState.getRootFallbackLink() != null) { + + if (theInternalDir.getChildren().containsKey(f.getName())) { + throw new FileAlreadyExistsException( + "A mount path(file/dir) already exist with the requested path: " + + theInternalDir.getChildren().get(f.getName()).fullPath); + } + + FileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path parent = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + String leaf = f.getName(); + Path fileToCreate = new Path(parent, leaf); + + try { + return linkedFallbackFs + .create(fileToCreate, permission, overwrite, bufferSize, + replication, blockSize, progress); + } catch (IOException e) { + StringBuilder msg = + new StringBuilder("Failed to create file:").append(fileToCreate) + .append(" at fallback : ").append(linkedFallbackFs.getUri()); + LOG.error(msg.toString(), e); + throw e; + } + } throw readOnlyMountTable("create", f); } @@ -1174,86 +1272,151 @@ public FileStatus getFileStatus(Path f) throws IOException { checkPathIsSlash(f); return new FileStatus(0, true, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(), - new Path(theInternalDir.fullPath).makeQualified( myUri, ROOT_PATH)); } - /** - * {@inheritDoc} - * - * Note: listStatus on root("/") considers listing from fallbackLink if - * available. If the same directory name is present in configured mount - * path as well as in fallback link, then only the configured mount path - * will be listed in the returned result. - */ @Override public FileStatus[] listStatus(Path f) throws AccessControlException, FileNotFoundException, IOException { checkPathIsSlash(f); FileStatus[] fallbackStatuses = listStatusForFallbackLink(); - FileStatus[] result = new FileStatus[theInternalDir.getChildren().size()]; + Set linkStatuses = new HashSet<>(); + Set internalDirStatuses = new HashSet<>(); int i = 0; for (Entry> iEntry : theInternalDir.getChildren().entrySet()) { INode inode = iEntry.getValue(); + Path path = new Path(inode.fullPath).makeQualified(myUri, null); if (inode.isLink()) { INodeLink link = (INodeLink) inode; - result[i++] = new FileStatus(0, false, 0, 0, - creationTime, creationTime, PERMISSION_555, - ugi.getShortUserName(), ugi.getPrimaryGroupName(), - link.getTargetLink(), - new Path(inode.fullPath).makeQualified( - myUri, null)); + if (showMountLinksAsSymlinks) { + // To maintain backward compatibility, with default option(showing + // mount links as symlinks), we will represent target link as + // symlink and rest other properties are belongs to mount link only. + linkStatuses.add( + new FileStatus(0, false, 0, 0, creationTime, creationTime, + PERMISSION_555, ugi.getShortUserName(), + ugi.getPrimaryGroupName(), link.getTargetLink(), path)); + continue; + } + + // We will represent as non-symlinks. Here it will show target + // directory/file properties like permissions, isDirectory etc on + // mount path. The path will be a mount link path and isDirectory is + // true if target is dir, otherwise false. + String linkedPath = link.getTargetFileSystem().getUri().getPath(); + if ("".equals(linkedPath)) { + linkedPath = "/"; + } + try { + FileStatus status = + ((ChRootedFileSystem)link.getTargetFileSystem()) + .getMyFs().getFileStatus(new Path(linkedPath)); + linkStatuses.add( + new FileStatus(status.getLen(), status.isDirectory(), + status.getReplication(), status.getBlockSize(), + status.getModificationTime(), status.getAccessTime(), + status.getPermission(), status.getOwner(), + status.getGroup(), null, path)); + } catch (FileNotFoundException ex) { + LOG.warn("Cannot get one of the children's(" + path + + ") target path(" + link.getTargetFileSystem().getUri() + + ") file status.", ex); + throw ex; + } } else { - result[i++] = new FileStatus(0, true, 0, 0, - creationTime, creationTime, PERMISSION_555, - ugi.getShortUserName(), ugi.getGroupNames()[0], - new Path(inode.fullPath).makeQualified( - myUri, null)); + internalDirStatuses.add( + new FileStatus(0, true, 0, 0, creationTime, creationTime, + PERMISSION_555, ugi.getShortUserName(), + ugi.getPrimaryGroupName(), path)); } } + FileStatus[] internalDirStatusesMergedWithFallBack = internalDirStatuses + .toArray(new FileStatus[internalDirStatuses.size()]); if (fallbackStatuses.length > 0) { - return consolidateFileStatuses(fallbackStatuses, result); - } else { - return result; + internalDirStatusesMergedWithFallBack = + merge(fallbackStatuses, internalDirStatusesMergedWithFallBack); } + // Links will always have precedence than internalDir or fallback paths. + return merge(linkStatuses.toArray(new FileStatus[linkStatuses.size()]), + internalDirStatusesMergedWithFallBack); } - private FileStatus[] consolidateFileStatuses(FileStatus[] fallbackStatuses, - FileStatus[] mountPointStatuses) { + private FileStatus[] merge(FileStatus[] toStatuses, + FileStatus[] fromStatuses) { ArrayList result = new ArrayList<>(); Set pathSet = new HashSet<>(); - for (FileStatus status : mountPointStatuses) { + for (FileStatus status : toStatuses) { result.add(status); pathSet.add(status.getPath().getName()); } - for (FileStatus status : fallbackStatuses) { + for (FileStatus status : fromStatuses) { if (!pathSet.contains(status.getPath().getName())) { result.add(status); } } - return result.toArray(new FileStatus[0]); + return result.toArray(new FileStatus[result.size()]); } private FileStatus[] listStatusForFallbackLink() throws IOException { - if (theInternalDir.isRoot() && - theInternalDir.getFallbackLink() != null) { - FileSystem linkedFs = - theInternalDir.getFallbackLink().getTargetFileSystem(); - // Fallback link is only applicable for root - FileStatus[] statuses = linkedFs.listStatus(new Path("/")); - for (FileStatus status : statuses) { - // Fix the path back to viewfs scheme - status.setPath( - new Path(myUri.toString(), status.getPath().getName())); + if (this.fsState.getRootFallbackLink() != null) { + FileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path p = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + if (theInternalDir.isRoot() || linkedFallbackFs.exists(p)) { + FileStatus[] statuses = linkedFallbackFs.listStatus(p); + for (FileStatus status : statuses) { + // Fix the path back to viewfs scheme + Path pathFromConfiguredFallbackRoot = + new Path(p, status.getPath().getName()); + status.setPath( + new Path(myUri.toString(), pathFromConfiguredFallbackRoot)); + } + return statuses; } - return statuses; - } else { - return new FileStatus[0]; } + return new FileStatus[0]; + } + + @Override + public ContentSummary getContentSummary(Path f) throws IOException { + long[] summary = {0, 0, 1}; + for (FileStatus status : listStatus(f)) { + Path targetPath = + Path.getPathWithoutSchemeAndAuthority(status.getPath()); + InodeTree.ResolveResult res = + fsState.resolve(targetPath.toString(), true); + ContentSummary child = + res.targetFileSystem.getContentSummary(res.remainingPath); + summary[0] += child.getLength(); + summary[1] += child.getFileCount(); + summary[2] += child.getDirectoryCount(); + } + return new ContentSummary.Builder() + .length(summary[0]) + .fileCount(summary[1]) + .directoryCount(summary[2]) + .build(); + } + + @Override + public FsStatus getStatus(Path p) throws IOException { + long[] summary = {0, 0, 0}; + for (FileStatus status : listStatus(p)) { + Path targetPath = + Path.getPathWithoutSchemeAndAuthority(status.getPath()); + InodeTree.ResolveResult res = + fsState.resolve(targetPath.toString(), true); + FsStatus child = res.targetFileSystem.getStatus(res.remainingPath); + summary[0] += child.getCapacity(); + summary[1] += child.getUsed(); + summary[2] += child.getRemaining(); + } + return new FsStatus(summary[0], summary[1], summary[2]); } @Override @@ -1267,6 +1430,31 @@ public boolean mkdirs(Path dir, FsPermission permission) dir.toString().substring(1))) { return true; // this is the stupid semantics of FileSystem } + + if (this.fsState.getRootFallbackLink() != null) { + FileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path parent = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + String leafChild = (InodeTree.SlashPath.equals(dir)) ? + InodeTree.SlashPath.toString() : + dir.getName(); + Path dirToCreate = new Path(parent, leafChild); + + try { + return linkedFallbackFs.mkdirs(dirToCreate, permission); + } catch (IOException e) { + if (LOG.isDebugEnabled()) { + StringBuilder msg = + new StringBuilder("Failed to create ").append(dirToCreate) + .append(" at fallback : ") + .append(linkedFallbackFs.getUri()); + LOG.debug(msg.toString(), e); + } + return false; + } + } + throw readOnlyMountTable("mkdirs", dir); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java index 36f9cd104cb6b..2165a3f9ee688 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java @@ -31,6 +31,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME; + /****************************************************************************** * This class is extended from the ViewFileSystem for the overloaded scheme * file system. Mount link configurations and in-memory mount table @@ -59,9 +61,9 @@ * data to mount with other hdfs and object store clusters(hdfs://NN1, * o3fs://bucket1.volume1/, s3a://bucket1/) * - * fs.viewfs.mounttable.Cluster./user = hdfs://NN1/user - * fs.viewfs.mounttable.Cluster./data = o3fs://bucket1.volume1/data - * fs.viewfs.mounttable.Cluster./backup = s3a://bucket1/backup/ + * fs.viewfs.mounttable.Cluster.link./user = hdfs://NN1/user + * fs.viewfs.mounttable.Cluster.link./data = o3fs://bucket1.volume1/data + * fs.viewfs.mounttable.Cluster.link./backup = s3a://bucket1/backup/ * * Op1: Create file hdfs://Cluster/user/fileA will go to hdfs://NN1/user/fileA * Op2: Create file hdfs://Cluster/data/datafile will go to @@ -75,15 +77,28 @@ * data to mount with other hdfs and object store clusters * (hdfs://NN1, o3fs://bucket1.volume1/) * - * fs.viewfs.mounttable.bucketA./user = hdfs://NN1/user - * fs.viewfs.mounttable.bucketA./data = o3fs://bucket1.volume1/data - * fs.viewfs.mounttable.bucketA./salesDB = s3a://bucketA/salesDB/ + * fs.viewfs.mounttable.bucketA.link./user = hdfs://NN1/user + * fs.viewfs.mounttable.bucketA.link./data = o3fs://bucket1.volume1/data + * fs.viewfs.mounttable.bucketA.link./salesDB = s3a://bucketA/salesDB/ * * Op1: Create file s3a://bucketA/user/fileA will go to hdfs://NN1/user/fileA * Op2: Create file s3a://bucketA/data/datafile will go to * o3fs://bucket1.volume1/data/datafile * Op3: Create file s3a://bucketA/salesDB/dbfile will go to * s3a://bucketA/salesDB/dbfile + * + * Note: + * (1) In ViewFileSystemOverloadScheme, by default the mount links will be + * represented as non-symlinks. If you want to change this behavior, please see + * {@link ViewFileSystem#listStatus(Path)} + * (2) In ViewFileSystemOverloadScheme, only the initialized uri's hostname will + * be considered as the mount table name. When the passed uri has hostname:port, + * it will simply ignore the port number and only hostname will be considered as + * the mount table name. + * (3) If there are no mount links configured with the initializing uri's + * hostname as the mount table name, then it will automatically consider the + * current uri as fallback( ex: fs.viewfs.mounttable..linkFallBack) + * target fs uri. *****************************************************************************/ @InterfaceAudience.LimitedPrivate({ "MapReduce", "HBase", "Hive" }) @InterfaceStability.Evolving @@ -98,6 +113,14 @@ public String getScheme() { return myUri.getScheme(); } + /** + * Returns the ViewFileSystem type. + * @return viewfs + */ + String getType() { + return FsConstants.VIEWFSOS_TYPE; + } + @Override public void initialize(URI theUri, Configuration conf) throws IOException { this.myUri = theUri; @@ -107,6 +130,14 @@ public void initialize(URI theUri, Configuration conf) throws IOException { } String mountTableConfigPath = conf.get(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH); + /* The default value to false in ViewFSOverloadScheme */ + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, + conf.getBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, + false)); + /* the default value to true in ViewFSOverloadScheme */ + conf.setBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, + conf.getBoolean(Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, + true)); if (null != mountTableConfigPath) { MountTableConfigLoader loader = new HCFSMountTableConfigLoader(); loader.load(mountTableConfigPath, conf); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java index c8a1d78cffd46..f486a10b4c8f9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java @@ -51,6 +51,17 @@ public static boolean isViewFileSystem(final FileSystem fileSystem) { return fileSystem.getScheme().equals(FsConstants.VIEWFS_SCHEME); } + /** + * Check if the FileSystem is a ViewFileSystemOverloadScheme. + * + * @param fileSystem + * @return true if the fileSystem is ViewFileSystemOverloadScheme + */ + public static boolean isViewFileSystemOverloadScheme( + final FileSystem fileSystem) { + return fileSystem instanceof ViewFileSystemOverloadScheme; + } + /** * Get FsStatus for all ViewFsMountPoints matching path for the given * ViewFileSystem. @@ -93,7 +104,8 @@ public static boolean isViewFileSystem(final FileSystem fileSystem) { */ public static Map getStatus( FileSystem fileSystem, Path path) throws IOException { - if (!isViewFileSystem(fileSystem)) { + if (!(isViewFileSystem(fileSystem) + || isViewFileSystemOverloadScheme(fileSystem))) { throw new UnsupportedFileSystemException("FileSystem '" + fileSystem.getUri() + "'is not a ViewFileSystem."); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index 607bdb8d423a0..95b596bde367d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.fs.viewfs; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT; import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555; import java.io.FileNotFoundException; @@ -31,6 +33,8 @@ import java.util.Map.Entry; import java.util.Set; + +import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -42,6 +46,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileChecksum; +import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsServerDefaults; @@ -67,7 +72,8 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Time; - +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * ViewFs (extends the AbstractFileSystem interface) implements a client-side @@ -154,6 +160,7 @@ @InterfaceAudience.Public @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */ public class ViewFs extends AbstractFileSystem { + static final Logger LOG = LoggerFactory.getLogger(ViewFs.class); final long creationTime; // of the the mount table final UserGroupInformation ugi; // the user/group of user who created mtable final Configuration config; @@ -161,6 +168,7 @@ public class ViewFs extends AbstractFileSystem { Path homeDir = null; private ViewFileSystem.RenameStrategy renameStrategy = ViewFileSystem.RenameStrategy.SAME_MOUNTPOINT; + private static boolean showMountLinksAsSymlinks = true; static AccessControlException readOnlyMountTable(final String operation, final String p) { @@ -188,7 +196,16 @@ URI[] getTargets() { return targets; } } - + + /** + * Returns the ViewFileSystem type. + * + * @return viewfs + */ + String getType() { + return FsConstants.VIEWFS_TYPE; + } + public ViewFs(final Configuration conf) throws IOException, URISyntaxException { this(FsConstants.VIEWFS_URI, conf); @@ -209,9 +226,15 @@ public ViewFs(final Configuration conf) throws IOException, creationTime = Time.now(); ugi = UserGroupInformation.getCurrentUser(); config = conf; + showMountLinksAsSymlinks = config + .getBoolean(CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, + CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT); // Now build client side view (i.e. client side mount table) from config. String authority = theUri.getAuthority(); - fsState = new InodeTree(conf, authority) { + boolean initingUriAsFallbackOnNoMounts = + !FsConstants.VIEWFS_TYPE.equals(getType()); + fsState = new InodeTree(conf, authority, theUri, + initingUriAsFallbackOnNoMounts) { @Override protected AbstractFileSystem getTargetFileSystem(final URI uri) @@ -228,7 +251,8 @@ protected AbstractFileSystem getTargetFileSystem(final URI uri) @Override protected AbstractFileSystem getTargetFileSystem( final INodeDir dir) throws URISyntaxException { - return new InternalDirOfViewFs(dir, creationTime, ugi, getUri()); + return new InternalDirOfViewFs(dir, creationTime, ugi, getUri(), this, + config); } @Override @@ -351,6 +375,14 @@ public FileChecksum getFileChecksum(final Path f) return res.targetFileSystem.getFileChecksum(res.remainingPath); } + /** + * {@inheritDoc} + * + * If the given path is a symlink(mount link), the path will be resolved to a + * target path and it will get the resolved path's FileStatus object. It will + * not be represented as a symlink and isDirectory API returns true if the + * resolved path is a directory, false otherwise. + */ @Override public FileStatus getFileStatus(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { @@ -436,6 +468,32 @@ public LocatedFileStatus getViewFsFileStatus(LocatedFileStatus stat, }; } + /** + * {@inheritDoc} + * + * Note: listStatus considers listing from fallbackLink if available. If the + * same directory path is present in configured mount path as well as in + * fallback fs, then only the fallback path will be listed in the returned + * result except for link. + * + * If any of the the immediate children of the given path f is a symlink(mount + * link), the returned FileStatus object of that children would be represented + * as a symlink. It will not be resolved to the target path and will not get + * the target path FileStatus object. The target path will be available via + * getSymlink on that children's FileStatus object. Since it represents as + * symlink, isDirectory on that children's FileStatus will return false. + * This behavior can be changed by setting an advanced configuration + * fs.viewfs.mount.links.as.symlinks to false. In this case, mount points will + * be represented as non-symlinks and all the file/directory attributes like + * permissions, isDirectory etc will be assigned from it's resolved target + * directory/file. + * + * If you want to get the FileStatus of target path for that children, you may + * want to use GetFileStatus API with that children's symlink path. Please see + * {@link ViewFs#getFileStatus(Path f)} + * + * Note: In ViewFs, by default the mount links are represented as symlinks. + */ @Override public FileStatus[] listStatus(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { @@ -843,15 +901,20 @@ static class InternalDirOfViewFs extends AbstractFileSystem { final long creationTime; // of the the mount table final UserGroupInformation ugi; // the user/group of user who created mtable final URI myUri; // the URI of the outer ViewFs - + private InodeTree fsState; + private Configuration conf; + public InternalDirOfViewFs(final InodeTree.INodeDir dir, - final long cTime, final UserGroupInformation ugi, final URI uri) + final long cTime, final UserGroupInformation ugi, final URI uri, + InodeTree fsState, Configuration conf) throws URISyntaxException { super(FsConstants.VIEWFS_URI, FsConstants.VIEWFS_SCHEME, false, -1); theInternalDir = dir; creationTime = cTime; this.ugi = ugi; myUri = uri; + this.fsState = fsState; + this.conf = conf; } static private void checkPathIsSlash(final Path f) throws IOException { @@ -870,6 +933,41 @@ public FSDataOutputStream createInternal(final Path f, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, UnresolvedLinkException, IOException { + Preconditions.checkNotNull(f, "File cannot be null."); + if (InodeTree.SlashPath.equals(f)) { + throw new FileAlreadyExistsException( + "/ is not a file. The directory / already exist at: " + + theInternalDir.fullPath); + } + + if (this.fsState.getRootFallbackLink() != null) { + if (theInternalDir.getChildren().containsKey(f.getName())) { + throw new FileAlreadyExistsException( + "A mount path(file/dir) already exist with the requested path: " + + theInternalDir.getChildren().get(f.getName()).fullPath); + } + + AbstractFileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path parent = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + String leaf = f.getName(); + Path fileToCreate = new Path(parent, leaf); + + try { + return linkedFallbackFs + .createInternal(fileToCreate, flag, absolutePermission, + bufferSize, replication, blockSize, progress, checksumOpt, + true); + } catch (IOException e) { + StringBuilder msg = + new StringBuilder("Failed to create file:").append(fileToCreate) + .append(" at fallback : ").append(linkedFallbackFs.getUri()); + LOG.error(msg.toString(), e); + throw e; + } + } + throw readOnlyMountTable("create", f); } @@ -917,11 +1015,25 @@ public FileStatus getFileLinkStatus(final Path f) if (inode.isLink()) { INodeLink inodelink = (INodeLink) inode; - result = new FileStatus(0, false, 0, 0, creationTime, creationTime, + try { + String linkedPath = inodelink.getTargetFileSystem() + .getUri().getPath(); + FileStatus status = ((ChRootedFs)inodelink.getTargetFileSystem()) + .getMyFs().getFileStatus(new Path(linkedPath)); + result = new FileStatus(status.getLen(), false, + status.getReplication(), status.getBlockSize(), + status.getModificationTime(), status.getAccessTime(), + status.getPermission(), status.getOwner(), status.getGroup(), + inodelink.getTargetLink(), + new Path(inode.fullPath).makeQualified( + myUri, null)); + } catch (FileNotFoundException ex) { + result = new FileStatus(0, false, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(), inodelink.getTargetLink(), new Path(inode.fullPath).makeQualified( myUri, null)); + } } else { result = new FileStatus(0, true, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(), @@ -961,83 +1073,145 @@ public int getUriDefaultPort() { * will be listed in the returned result. */ @Override - public FileStatus[] listStatus(final Path f) throws AccessControlException, - IOException { + public FileStatus[] listStatus(final Path f) throws IOException { checkPathIsSlash(f); FileStatus[] fallbackStatuses = listStatusForFallbackLink(); - FileStatus[] result = new FileStatus[theInternalDir.getChildren().size()]; + Set linkStatuses = new HashSet<>(); + Set internalDirStatuses = new HashSet<>(); int i = 0; for (Entry> iEntry : theInternalDir.getChildren().entrySet()) { INode inode = iEntry.getValue(); - - + Path path = new Path(inode.fullPath).makeQualified(myUri, null); if (inode.isLink()) { INodeLink link = (INodeLink) inode; - result[i++] = new FileStatus(0, false, 0, 0, - creationTime, creationTime, - PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(), - link.getTargetLink(), - new Path(inode.fullPath).makeQualified( - myUri, null)); + if (showMountLinksAsSymlinks) { + // To maintain backward compatibility, with default option(showing + // mount links as symlinks), we will represent target link as + // symlink and rest other properties are belongs to mount link only. + linkStatuses.add( + new FileStatus(0, false, 0, 0, creationTime, creationTime, + PERMISSION_555, ugi.getShortUserName(), + ugi.getPrimaryGroupName(), link.getTargetLink(), path)); + continue; + } + + // We will represent as non-symlinks. Here it will show target + // directory/file properties like permissions, isDirectory etc on + // mount path. The path will be a mount link path and isDirectory is + // true if target is dir, otherwise false. + String linkedPath = link.getTargetFileSystem().getUri().getPath(); + if ("".equals(linkedPath)) { + linkedPath = "/"; + } + try { + FileStatus status = + ((ChRootedFs) link.getTargetFileSystem()).getMyFs() + .getFileStatus(new Path(linkedPath)); + linkStatuses.add( + new FileStatus(status.getLen(), status.isDirectory(), + status.getReplication(), status.getBlockSize(), + status.getModificationTime(), status.getAccessTime(), + status.getPermission(), status.getOwner(), + status.getGroup(), null, path)); + } catch (FileNotFoundException ex) { + LOG.warn("Cannot get one of the children's(" + path + + ") target path(" + link.getTargetFileSystem().getUri() + + ") file status.", ex); + throw ex; + } } else { - result[i++] = new FileStatus(0, true, 0, 0, - creationTime, creationTime, - PERMISSION_555, ugi.getShortUserName(), ugi.getGroupNames()[0], - new Path(inode.fullPath).makeQualified( - myUri, null)); + internalDirStatuses.add( + new FileStatus(0, true, 0, 0, creationTime, creationTime, + PERMISSION_555, ugi.getShortUserName(), + ugi.getPrimaryGroupName(), path)); } } + + FileStatus[] internalDirStatusesMergedWithFallBack = internalDirStatuses + .toArray(new FileStatus[internalDirStatuses.size()]); if (fallbackStatuses.length > 0) { - return consolidateFileStatuses(fallbackStatuses, result); - } else { - return result; + internalDirStatusesMergedWithFallBack = + merge(fallbackStatuses, internalDirStatusesMergedWithFallBack); } + + // Links will always have precedence than internalDir or fallback paths. + return merge(linkStatuses.toArray(new FileStatus[linkStatuses.size()]), + internalDirStatusesMergedWithFallBack); } - private FileStatus[] consolidateFileStatuses(FileStatus[] fallbackStatuses, - FileStatus[] mountPointStatuses) { + private FileStatus[] merge(FileStatus[] toStatuses, + FileStatus[] fromStatuses) { ArrayList result = new ArrayList<>(); Set pathSet = new HashSet<>(); - for (FileStatus status : mountPointStatuses) { + for (FileStatus status : toStatuses) { result.add(status); pathSet.add(status.getPath().getName()); } - for (FileStatus status : fallbackStatuses) { + for (FileStatus status : fromStatuses) { if (!pathSet.contains(status.getPath().getName())) { result.add(status); } } - return result.toArray(new FileStatus[0]); + return result.toArray(new FileStatus[result.size()]); } private FileStatus[] listStatusForFallbackLink() throws IOException { - if (theInternalDir.isRoot() && - theInternalDir.getFallbackLink() != null) { - AbstractFileSystem linkedFs = - theInternalDir.getFallbackLink().getTargetFileSystem(); - // Fallback link is only applicable for root - FileStatus[] statuses = linkedFs.listStatus(new Path("/")); - for (FileStatus status : statuses) { - // Fix the path back to viewfs scheme - status.setPath( - new Path(myUri.toString(), status.getPath().getName())); + if (fsState.getRootFallbackLink() != null) { + AbstractFileSystem linkedFallbackFs = + fsState.getRootFallbackLink().getTargetFileSystem(); + Path p = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + if (theInternalDir.isRoot() || FileContext + .getFileContext(linkedFallbackFs, conf).util().exists(p)) { + // Fallback link is only applicable for root + FileStatus[] statuses = linkedFallbackFs.listStatus(p); + for (FileStatus status : statuses) { + // Fix the path back to viewfs scheme + Path pathFromConfiguredFallbackRoot = + new Path(p, status.getPath().getName()); + status.setPath( + new Path(myUri.toString(), pathFromConfiguredFallbackRoot)); + } + return statuses; } - return statuses; - } else { - return new FileStatus[0]; } + return new FileStatus[0]; } @Override public void mkdir(final Path dir, final FsPermission permission, - final boolean createParent) throws AccessControlException, - FileAlreadyExistsException { + final boolean createParent) throws IOException { if (theInternalDir.isRoot() && dir == null) { throw new FileAlreadyExistsException("/ already exits"); } + + if (this.fsState.getRootFallbackLink() != null) { + AbstractFileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path parent = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + String leafChild = (InodeTree.SlashPath.equals(dir)) ? + InodeTree.SlashPath.toString() : + dir.getName(); + Path dirToCreate = new Path(parent, leafChild); + try { + // We are here because, the parent dir already exist in the mount + // table internal tree. So, let's create parent always in fallback. + linkedFallbackFs.mkdir(dirToCreate, permission, true); + return; + } catch (IOException e) { + if (LOG.isDebugEnabled()) { + StringBuilder msg = new StringBuilder("Failed to create {}") + .append(" at fallback fs : {}"); + LOG.debug(msg.toString(), dirToCreate, linkedFallbackFs.getUri()); + } + throw e; + } + } + throw readOnlyMountTable("mkdir", dir); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java index 4fc52d557cf9d..5ad71f373f2d0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java @@ -213,7 +213,7 @@ public void failover(HAServiceTarget fromSvc, // Fence fromSvc if it's required or forced by the user if (tryFence) { - if (!fromSvc.getFencer().fence(fromSvc)) { + if (!fromSvc.getFencer().fence(fromSvc, toSvc)) { throw new FailoverFailedException("Unable to fence " + fromSvc + ". Fencing failed."); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java index 0950ea7e01c57..34e37650ade1c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java @@ -19,9 +19,9 @@ import java.io.IOException; import java.io.PrintStream; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.Map; import org.apache.commons.cli.Options; @@ -107,8 +107,7 @@ protected HAAdmin(Configuration conf) { protected abstract HAServiceTarget resolveTarget(String string); protected Collection getTargetIds(String targetNodeToActivate) { - return new ArrayList( - Arrays.asList(new String[]{targetNodeToActivate})); + return Collections.singleton(targetNodeToActivate); } protected String getUsageString() { @@ -188,8 +187,10 @@ private int transitionToActive(final CommandLine cmd) private boolean isOtherTargetNodeActive(String targetNodeToActivate, boolean forceActive) throws IOException { Collection targetIds = getTargetIds(targetNodeToActivate); - targetIds.remove(targetNodeToActivate); - for(String targetId : targetIds) { + for (String targetId : targetIds) { + if (targetNodeToActivate.equals(targetId)) { + continue; + } HAServiceTarget target = resolveTarget(targetId); if (!checkManualStateManagementOK(target)) { return true; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java index 9d5c8e7b7ea3b..ff9658f1bbc03 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java @@ -44,6 +44,12 @@ public abstract class HAServiceTarget { private static final String PORT_SUBST_KEY = "port"; private static final String ADDRESS_SUBST_KEY = "address"; + /** + * The HAState this service target is intended to be after transition + * is complete. + */ + private HAServiceProtocol.HAServiceState transitionTargetHAStatus; + /** * @return the IPC address of the target node. */ @@ -93,6 +99,15 @@ public HAServiceProtocol getProxy(Configuration conf, int timeoutMs) return getProxyForAddress(conf, timeoutMs, getAddress()); } + public void setTransitionTargetHAStatus( + HAServiceProtocol.HAServiceState status) { + this.transitionTargetHAStatus = status; + } + + public HAServiceProtocol.HAServiceState getTransitionTargetHAStatus() { + return this.transitionTargetHAStatus; + } + /** * Returns a proxy to connect to the target HA service for health monitoring. * If {@link #getHealthMonitorAddress()} is implemented to return a non-null diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java index 64e7315130257..b0cead56ac0e7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java @@ -89,15 +89,32 @@ public static NodeFencer create(Configuration conf, String confKey) } public boolean fence(HAServiceTarget fromSvc) { + return fence(fromSvc, null); + } + + public boolean fence(HAServiceTarget fromSvc, HAServiceTarget toSvc) { LOG.info("====== Beginning Service Fencing Process... ======"); int i = 0; for (FenceMethodWithArg method : methods) { LOG.info("Trying method " + (++i) + "/" + methods.size() +": " + method); try { - if (method.method.tryFence(fromSvc, method.arg)) { - LOG.info("====== Fencing successful by method " + method + " ======"); - return true; + // only true when target node is given, AND fencing on it failed + boolean toSvcFencingFailed = false; + // if target is given, try to fence on target first. Only if fencing + // on target succeeded, do fencing on source node. + if (toSvc != null) { + toSvcFencingFailed = !method.method.tryFence(toSvc, method.arg); + } + if (toSvcFencingFailed) { + LOG.error("====== Fencing on target failed, skipping fencing " + + "on source ======"); + } else { + if (method.method.tryFence(fromSvc, method.arg)) { + LOG.info("====== Fencing successful by method " + + method + " ======"); + return true; + } } } catch (BadFencingConfigurationException e) { LOG.error("Fencing method " + method + " misconfigured", e); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java index 7e4a88f729fad..6363063abf2e2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.lang.reflect.Field; +import java.util.Arrays; import java.util.Map; import org.apache.hadoop.conf.Configured; @@ -60,6 +61,11 @@ public class ShellCommandFencer /** Prefix for target parameters added to the environment */ private static final String TARGET_PREFIX = "target_"; + /** Prefix for source parameters added to the environment */ + private static final String SOURCE_PREFIX = "source_"; + + private static final String ARG_DELIMITER = ","; + @VisibleForTesting static Logger LOG = LoggerFactory.getLogger(ShellCommandFencer.class); @@ -73,8 +79,9 @@ public void checkArgs(String args) throws BadFencingConfigurationException { } @Override - public boolean tryFence(HAServiceTarget target, String cmd) { + public boolean tryFence(HAServiceTarget target, String args) { ProcessBuilder builder; + String cmd = parseArgs(target.getTransitionTargetHAStatus(), args); if (!Shell.WINDOWS) { builder = new ProcessBuilder("bash", "-e", "-c", cmd); @@ -127,6 +134,28 @@ public boolean tryFence(HAServiceTarget target, String cmd) { return rc == 0; } + private String parseArgs(HAServiceProtocol.HAServiceState state, + String cmd) { + String[] args = cmd.split(ARG_DELIMITER); + if (args.length == 1) { + // only one command is given, assuming both src and dst + // will execute the same command/script. + return args[0]; + } + if (args.length > 2) { + throw new IllegalArgumentException("Expecting arguments size of at most " + + "two, getting " + Arrays.asList(args)); + } + if (HAServiceProtocol.HAServiceState.ACTIVE.equals(state)) { + return args[0]; + } else if (HAServiceProtocol.HAServiceState.STANDBY.equals(state)) { + return args[1]; + } else { + throw new IllegalArgumentException( + "Unexpected HA service state:" + state); + } + } + /** * Abbreviate a string by putting '...' in the middle of it, * in an attempt to keep logs from getting too messy. @@ -190,9 +219,24 @@ private void setConfAsEnvVars(Map env) { */ private void addTargetInfoAsEnvVars(HAServiceTarget target, Map environment) { + String prefix; + HAServiceProtocol.HAServiceState targetState = + target.getTransitionTargetHAStatus(); + if (targetState == null || + HAServiceProtocol.HAServiceState.ACTIVE.equals(targetState)) { + // null is assumed to be same as ACTIVE, this is to be compatible + // with existing tests/use cases where target state is not specified + // but assuming it's active. + prefix = TARGET_PREFIX; + } else if (HAServiceProtocol.HAServiceState.STANDBY.equals(targetState)) { + prefix = SOURCE_PREFIX; + } else { + throw new IllegalArgumentException( + "Unexpected HA service state:" + targetState); + } for (Map.Entry e : target.getFencingParameters().entrySet()) { - String key = TARGET_PREFIX + e.getKey(); + String key = prefix + e.getKey(); key = key.replace('.', '_'); environment.put(key, e.getValue()); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java index 61ea53c420ab1..09161c745dc06 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.ZKFCProtocolService; import org.apache.hadoop.ha.protocolPB.ZKFCProtocolPB; import org.apache.hadoop.ha.protocolPB.ZKFCProtocolServerSideTranslatorPB; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.security.AccessControlException; @@ -51,7 +51,7 @@ public class ZKFCRpcServer implements ZKFCProtocol { this.zkfc = zkfc; RPC.setProtocolEngine(conf, ZKFCProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ZKFCProtocolServerSideTranslatorPB translator = new ZKFCProtocolServerSideTranslatorPB(this); BlockingService service = ZKFCProtocolService diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java index e53820cd13107..2cbfd0d0ec030 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java @@ -38,7 +38,7 @@ import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto; import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.UserGroupInformation; @@ -67,7 +67,7 @@ public class HAServiceProtocolClientSideTranslatorPB implements public HAServiceProtocolClientSideTranslatorPB(InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, HAServiceProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); rpcProxy = RPC.getProxy(HAServiceProtocolPB.class, RPC.getProtocolVersion(HAServiceProtocolPB.class), addr, conf); } @@ -76,7 +76,7 @@ public HAServiceProtocolClientSideTranslatorPB( InetSocketAddress addr, Configuration conf, SocketFactory socketFactory, int timeout) throws IOException { RPC.setProtocolEngine(conf, HAServiceProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); rpcProxy = RPC.getProxy(HAServiceProtocolPB.class, RPC.getProtocolVersion(HAServiceProtocolPB.class), addr, UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java index 7001d93995f0f..3777207c7e45c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.CedeActiveRequestProto; import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.GracefulFailoverRequestProto; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.AccessControlException; @@ -48,7 +48,7 @@ public ZKFCProtocolClientSideTranslatorPB( InetSocketAddress addr, Configuration conf, SocketFactory socketFactory, int timeout) throws IOException { RPC.setProtocolEngine(conf, ZKFCProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); rpcProxy = RPC.getProxy(ZKFCProtocolPB.class, RPC.getProtocolVersion(ZKFCProtocolPB.class), addr, UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index 3fd74f0e89a27..8b69d57e8120e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -1346,7 +1346,11 @@ private void bindForPortRange(ServerConnector listener, int startPort) try { bindListener(listener); return; - } catch (BindException ex) { + } catch (IOException ex) { + if (!(ex instanceof BindException) + && !(ex.getCause() instanceof BindException)) { + throw ex; + } // Ignore exception. Move to next port. ioException = ex; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java index fc64697bb8c75..915427f8e1845 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.security.Principal; -import java.util.HashMap; +import java.util.Collections; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -121,14 +121,10 @@ public void init(FilterConfig conf) throws ServletException { @Override public void initFilter(FilterContainer container, Configuration conf) { - HashMap options = new HashMap(); - String username = getUsernameFromConf(conf); - options.put(HADOOP_HTTP_STATIC_USER, username); - container.addFilter("static_user_filter", - StaticUserFilter.class.getName(), - options); + container.addFilter("static_user_filter", StaticUserFilter.class.getName(), + Collections.singletonMap(HADOOP_HTTP_STATIC_USER, username)); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java index 9d3c3c1ceeaa7..f14d99227c7cc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java @@ -272,7 +272,7 @@ private static void checkStat(File f, String owner, String group, UserGroupInformation.createRemoteUser(expectedOwner); final String adminsGroupString = "Administrators"; success = owner.equals(adminsGroupString) - && ugi.getGroups().contains(adminsGroupString); + && ugi.getGroupsSet().contains(adminsGroupString); } else { success = false; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java index 64824a15cd89c..6db00d724aa35 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java @@ -35,6 +35,7 @@ import java.lang.reflect.Proxy; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.Map; /** @@ -312,6 +313,8 @@ public String toString() { private volatile boolean hasSuccessfulCall = false; + private HashSet failedAtLeastOnce = new HashSet<>(); + private final RetryPolicy defaultPolicy; private final Map methodNameToPolicyMap; @@ -390,12 +393,18 @@ private RetryInfo handleException(final Method method, final int callId, private void log(final Method method, final boolean isFailover, final int failovers, final long delay, final Exception ex) { - // log info if this has made some successful calls or - // this is not the first failover - final boolean info = hasSuccessfulCall || failovers != 0 - || asyncCallHandler.hasSuccessfulCall(); - if (!info && !LOG.isDebugEnabled()) { - return; + boolean info = true; + // If this is the first failover to this proxy, skip logging at INFO level + if (!failedAtLeastOnce.contains(proxyDescriptor.getProxyInfo().toString())) + { + failedAtLeastOnce.add(proxyDescriptor.getProxyInfo().toString()); + + // If successful calls were made to this proxy, log info even for first + // failover + info = hasSuccessfulCall || asyncCallHandler.hasSuccessfulCall(); + if (!info && !LOG.isDebugEnabled()) { + return; + } } final StringBuilder b = new StringBuilder() diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index 688eed647c209..6240f859cf786 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -649,6 +649,7 @@ private synchronized boolean updateAddress() throws IOException { private synchronized void setupConnection( UserGroupInformation ticket) throws IOException { + LOG.debug("Setup connection to " + server.toString()); short ioFailures = 0; short timeoutFailures = 0; while (true) { @@ -711,8 +712,16 @@ private synchronized void setupConnection( } catch (IOException ie) { if (updateAddress()) { timeoutFailures = ioFailures = 0; + try { + // HADOOP-17068: when server changed, ignore the exception. + handleConnectionFailure(ioFailures++, ie); + } catch (IOException ioe) { + LOG.warn("Exception when handle ConnectionFailure: " + + ioe.getMessage()); + } + } else { + handleConnectionFailure(ioFailures++, ie); } - handleConnectionFailure(ioFailures++, ie); } } } @@ -1277,7 +1286,7 @@ private synchronized void close() { cleanupCalls(); } } else { - // log the info + // Log the newest server information if update address. if (LOG.isDebugEnabled()) { LOG.debug("closing ipc connection to " + server + ": " + closeException.getMessage(),closeException); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java index 3e952eb63c3ff..45cbd4e99dff8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java @@ -43,6 +43,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ipc.metrics.DecayRpcSchedulerDetailedMetrics; +import org.apache.hadoop.ipc.metrics.RpcMetrics; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; @@ -632,8 +633,8 @@ public void addResponseTime(String callName, Schedulable schedulable, addCost(user, processingCost); int priorityLevel = schedulable.getPriorityLevel(); - long queueTime = details.get(Timing.QUEUE, TimeUnit.MILLISECONDS); - long processingTime = details.get(Timing.PROCESSING, TimeUnit.MILLISECONDS); + long queueTime = details.get(Timing.QUEUE, RpcMetrics.TIMEUNIT); + long processingTime = details.get(Timing.PROCESSING, RpcMetrics.TIMEUNIT); this.decayRpcSchedulerDetailedMetrics.addQueueTime( priorityLevel, queueTime); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java index bb86cfc35bf4e..1e110b9011313 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java @@ -53,6 +53,23 @@ public static IOException getRemoteException(ServiceException se) { return e instanceof IOException ? (IOException) e : new IOException(se); } + /** + * Kept for backward compatible. + * Return the IOException thrown by the remote server wrapped in + * ServiceException as cause. + * @param se ServiceException that wraps IO exception thrown by the server + * @return Exception wrapped in ServiceException or + * a new IOException that wraps the unexpected ServiceException. + */ + @Deprecated + public static IOException getRemoteException( + com.google.protobuf.ServiceException se) { + Throwable e = se.getCause(); + if (e == null) { + return new IOException(se); + } + return e instanceof IOException ? (IOException) e : new IOException(se); + } /** * Map used to cache fixed strings to ByteStrings. Since there is no diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java index 14b356f847acf..220ad1ded9fec 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java @@ -19,8 +19,11 @@ package org.apache.hadoop.ipc; import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.protobuf.*; -import org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.BlockingService; +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.ServiceException; +import com.google.protobuf.TextFormat; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -29,6 +32,7 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.ipc.Client.ConnectionId; import org.apache.hadoop.ipc.RPC.RpcInvoker; +import org.apache.hadoop.ipc.RPC.RpcKind; import org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; @@ -52,7 +56,10 @@ /** * RPC Engine for for protobuf based RPCs. + * This engine uses Protobuf 2.5.0. Recommended to upgrade to Protobuf 3.x + * from hadoop-thirdparty and use ProtobufRpcEngine2. */ +@Deprecated @InterfaceStability.Evolving public class ProtobufRpcEngine implements RpcEngine { public static final Logger LOG = @@ -355,6 +362,7 @@ public static class Server extends RPC.Server { new ThreadLocal<>(); static final ThreadLocal currentCallInfo = new ThreadLocal<>(); + private static final RpcInvoker RPC_INVOKER = new ProtoBufRpcInvoker(); static class CallInfo { private final RPC.Server server; @@ -433,7 +441,15 @@ public Server(Class protocolClass, Object protocolImpl, registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass, protocolImpl); } - + + @Override + protected RpcInvoker getServerRpcInvoker(RpcKind rpcKind) { + if (rpcKind == RpcKind.RPC_PROTOCOL_BUFFER) { + return RPC_INVOKER; + } + return super.getServerRpcInvoker(rpcKind); + } + /** * Protobuf invoker for {@link RpcInvoker} */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java new file mode 100644 index 0000000000000..30315343962c8 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java @@ -0,0 +1,598 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ipc; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.protobuf.*; +import org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.ipc.Client.ConnectionId; +import org.apache.hadoop.ipc.RPC.RpcInvoker; +import org.apache.hadoop.ipc.protobuf.ProtobufRpcEngine2Protos.RequestHeaderProto; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.SecretManager; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.AsyncGet; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.SocketFactory; +import java.io.IOException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.net.InetSocketAddress; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * RPC Engine for for protobuf based RPCs. + */ +@InterfaceStability.Evolving +public class ProtobufRpcEngine2 implements RpcEngine { + public static final Logger LOG = + LoggerFactory.getLogger(ProtobufRpcEngine2.class); + private static final ThreadLocal> + ASYNC_RETURN_MESSAGE = new ThreadLocal<>(); + + static { // Register the rpcRequest deserializer for ProtobufRpcEngine + org.apache.hadoop.ipc.Server.registerProtocolEngine( + RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcProtobufRequest.class, + new Server.ProtoBufRpcInvoker()); + } + + private static final ClientCache CLIENTS = new ClientCache(); + + @Unstable + public static AsyncGet getAsyncReturnMessage() { + return ASYNC_RETURN_MESSAGE.get(); + } + + public ProtocolProxy getProxy(Class protocol, long clientVersion, + InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, + SocketFactory factory, int rpcTimeout) throws IOException { + return getProxy(protocol, clientVersion, addr, ticket, conf, factory, + rpcTimeout, null); + } + + @Override + public ProtocolProxy getProxy( + Class protocol, long clientVersion, + InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, + SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy) + throws IOException { + return getProxy(protocol, clientVersion, addr, ticket, conf, factory, + rpcTimeout, connectionRetryPolicy, null, null); + } + + @Override + @SuppressWarnings("unchecked") + public ProtocolProxy getProxy(Class protocol, long clientVersion, + InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, + SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy, + AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext) + throws IOException { + + final Invoker invoker = new Invoker(protocol, addr, ticket, conf, factory, + rpcTimeout, connectionRetryPolicy, fallbackToSimpleAuth, + alignmentContext); + return new ProtocolProxy(protocol, (T) Proxy.newProxyInstance( + protocol.getClassLoader(), new Class[]{protocol}, invoker), false); + } + + @Override + public ProtocolProxy getProtocolMetaInfoProxy( + ConnectionId connId, Configuration conf, SocketFactory factory) + throws IOException { + Class protocol = ProtocolMetaInfoPB.class; + return new ProtocolProxy(protocol, + (ProtocolMetaInfoPB) Proxy.newProxyInstance(protocol.getClassLoader(), + new Class[]{protocol}, new Invoker(protocol, connId, conf, + factory)), false); + } + + private static final class Invoker implements RpcInvocationHandler { + private final Map returnTypes = + new ConcurrentHashMap(); + private boolean isClosed = false; + private final Client.ConnectionId remoteId; + private final Client client; + private final long clientProtocolVersion; + private final String protocolName; + private AtomicBoolean fallbackToSimpleAuth; + private AlignmentContext alignmentContext; + + private Invoker(Class protocol, InetSocketAddress addr, + UserGroupInformation ticket, Configuration conf, SocketFactory factory, + int rpcTimeout, RetryPolicy connectionRetryPolicy, + AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext) + throws IOException { + this(protocol, Client.ConnectionId.getConnectionId( + addr, protocol, ticket, rpcTimeout, connectionRetryPolicy, conf), + conf, factory); + this.fallbackToSimpleAuth = fallbackToSimpleAuth; + this.alignmentContext = alignmentContext; + } + + /** + * This constructor takes a connectionId, instead of creating a new one. + */ + private Invoker(Class protocol, Client.ConnectionId connId, + Configuration conf, SocketFactory factory) { + this.remoteId = connId; + this.client = CLIENTS.getClient(conf, factory, RpcWritable.Buffer.class); + this.protocolName = RPC.getProtocolName(protocol); + this.clientProtocolVersion = RPC + .getProtocolVersion(protocol); + } + + private RequestHeaderProto constructRpcRequestHeader(Method method) { + RequestHeaderProto.Builder builder = RequestHeaderProto + .newBuilder(); + builder.setMethodName(method.getName()); + + + // For protobuf, {@code protocol} used when creating client side proxy is + // the interface extending BlockingInterface, which has the annotations + // such as ProtocolName etc. + // + // Using Method.getDeclaringClass(), as in WritableEngine to get at + // the protocol interface will return BlockingInterface, from where + // the annotation ProtocolName and Version cannot be + // obtained. + // + // Hence we simply use the protocol class used to create the proxy. + // For PB this may limit the use of mixins on client side. + builder.setDeclaringClassProtocolName(protocolName); + builder.setClientProtocolVersion(clientProtocolVersion); + return builder.build(); + } + + /** + * This is the client side invoker of RPC method. It only throws + * ServiceException, since the invocation proxy expects only + * ServiceException to be thrown by the method in case protobuf service. + * + * ServiceException has the following causes: + *
    + *
  1. Exceptions encountered on the client side in this method are + * set as cause in ServiceException as is.
  2. + *
  3. Exceptions from the server are wrapped in RemoteException and are + * set as cause in ServiceException
  4. + *
+ * + * Note that the client calling protobuf RPC methods, must handle + * ServiceException by getting the cause from the ServiceException. If the + * cause is RemoteException, then unwrap it to get the exception thrown by + * the server. + */ + @Override + public Message invoke(Object proxy, final Method method, Object[] args) + throws ServiceException { + long startTime = 0; + if (LOG.isDebugEnabled()) { + startTime = Time.now(); + } + + if (args.length != 2) { // RpcController + Message + throw new ServiceException( + "Too many or few parameters for request. Method: [" + + method.getName() + "]" + ", Expected: 2, Actual: " + + args.length); + } + if (args[1] == null) { + throw new ServiceException("null param while calling Method: [" + + method.getName() + "]"); + } + + // if Tracing is on then start a new span for this rpc. + // guard it in the if statement to make sure there isn't + // any extra string manipulation. + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = null; + if (tracer != null) { + traceScope = tracer.newScope(RpcClientUtil.methodToTraceString(method)); + } + + RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method); + + if (LOG.isTraceEnabled()) { + LOG.trace(Thread.currentThread().getId() + ": Call -> " + + remoteId + ": " + method.getName() + + " {" + TextFormat.shortDebugString((Message) args[1]) + "}"); + } + + + final Message theRequest = (Message) args[1]; + final RpcWritable.Buffer val; + try { + val = (RpcWritable.Buffer) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER, + new RpcProtobufRequest(rpcRequestHeader, theRequest), remoteId, + fallbackToSimpleAuth, alignmentContext); + + } catch (Throwable e) { + if (LOG.isTraceEnabled()) { + LOG.trace(Thread.currentThread().getId() + ": Exception <- " + + remoteId + ": " + method.getName() + + " {" + e + "}"); + } + if (traceScope != null) { + traceScope.addTimelineAnnotation("Call got exception: " + + e.toString()); + } + throw new ServiceException(e); + } finally { + if (traceScope != null) { + traceScope.close(); + } + } + + if (LOG.isDebugEnabled()) { + long callTime = Time.now() - startTime; + LOG.debug("Call: " + method.getName() + " took " + callTime + "ms"); + } + + if (Client.isAsynchronousMode()) { + final AsyncGet arr + = Client.getAsyncRpcResponse(); + final AsyncGet asyncGet = + new AsyncGet() { + @Override + public Message get(long timeout, TimeUnit unit) throws Exception { + return getReturnMessage(method, arr.get(timeout, unit)); + } + + @Override + public boolean isDone() { + return arr.isDone(); + } + }; + ASYNC_RETURN_MESSAGE.set(asyncGet); + return null; + } else { + return getReturnMessage(method, val); + } + } + + private Message getReturnMessage(final Method method, + final RpcWritable.Buffer buf) throws ServiceException { + Message prototype = null; + try { + prototype = getReturnProtoType(method); + } catch (Exception e) { + throw new ServiceException(e); + } + Message returnMessage; + try { + returnMessage = buf.getValue(prototype.getDefaultInstanceForType()); + + if (LOG.isTraceEnabled()) { + LOG.trace(Thread.currentThread().getId() + ": Response <- " + + remoteId + ": " + method.getName() + + " {" + TextFormat.shortDebugString(returnMessage) + "}"); + } + + } catch (Throwable e) { + throw new ServiceException(e); + } + return returnMessage; + } + + @Override + public void close() throws IOException { + if (!isClosed) { + isClosed = true; + CLIENTS.stopClient(client); + } + } + + private Message getReturnProtoType(Method method) throws Exception { + if (returnTypes.containsKey(method.getName())) { + return returnTypes.get(method.getName()); + } + + Class returnType = method.getReturnType(); + Method newInstMethod = returnType.getMethod("getDefaultInstance"); + newInstMethod.setAccessible(true); + Message prototype = (Message) newInstMethod.invoke(null, (Object[]) null); + returnTypes.put(method.getName(), prototype); + return prototype; + } + + @Override //RpcInvocationHandler + public ConnectionId getConnectionId() { + return remoteId; + } + } + + @VisibleForTesting + @InterfaceAudience.Private + @InterfaceStability.Unstable + static Client getClient(Configuration conf) { + return CLIENTS.getClient(conf, SocketFactory.getDefault(), + RpcWritable.Buffer.class); + } + + + + @Override + public RPC.Server getServer(Class protocol, Object protocolImpl, + String bindAddress, int port, int numHandlers, int numReaders, + int queueSizePerHandler, boolean verbose, Configuration conf, + SecretManager secretManager, + String portRangeConfig, AlignmentContext alignmentContext) + throws IOException { + return new Server(protocol, protocolImpl, conf, bindAddress, port, + numHandlers, numReaders, queueSizePerHandler, verbose, secretManager, + portRangeConfig, alignmentContext); + } + + public static class Server extends RPC.Server { + + static final ThreadLocal CURRENT_CALLBACK = + new ThreadLocal<>(); + + static final ThreadLocal CURRENT_CALL_INFO = new ThreadLocal<>(); + + static class CallInfo { + private final RPC.Server server; + private final String methodName; + + CallInfo(RPC.Server server, String methodName) { + this.server = server; + this.methodName = methodName; + } + } + + static class ProtobufRpcEngineCallbackImpl + implements ProtobufRpcEngineCallback2 { + + private final RPC.Server server; + private final Call call; + private final String methodName; + private final long setupTime; + + ProtobufRpcEngineCallbackImpl() { + this.server = CURRENT_CALL_INFO.get().server; + this.call = Server.getCurCall().get(); + this.methodName = CURRENT_CALL_INFO.get().methodName; + this.setupTime = Time.now(); + } + + @Override + public void setResponse(Message message) { + long processingTime = Time.now() - setupTime; + call.setDeferredResponse(RpcWritable.wrap(message)); + server.updateDeferredMetrics(methodName, processingTime); + } + + @Override + public void error(Throwable t) { + long processingTime = Time.now() - setupTime; + String detailedMetricsName = t.getClass().getSimpleName(); + server.updateDeferredMetrics(detailedMetricsName, processingTime); + call.setDeferredError(t); + } + } + + @InterfaceStability.Unstable + public static ProtobufRpcEngineCallback2 registerForDeferredResponse() { + ProtobufRpcEngineCallback2 callback = new ProtobufRpcEngineCallbackImpl(); + CURRENT_CALLBACK.set(callback); + return callback; + } + + /** + * Construct an RPC server. + * + * @param protocolClass the class of protocol + * @param protocolImpl the protocolImpl whose methods will be called + * @param conf the configuration to use + * @param bindAddress the address to bind on to listen for connection + * @param port the port to listen for connections on + * @param numHandlers the number of method handler threads to run + * @param verbose whether each call should be logged + * @param portRangeConfig A config parameter that can be used to restrict + * the range of ports used when port is 0 (an ephemeral port) + * @param alignmentContext provides server state info on client responses + */ + public Server(Class protocolClass, Object protocolImpl, + Configuration conf, String bindAddress, int port, int numHandlers, + int numReaders, int queueSizePerHandler, boolean verbose, + SecretManager secretManager, + String portRangeConfig, AlignmentContext alignmentContext) + throws IOException { + super(bindAddress, port, null, numHandlers, + numReaders, queueSizePerHandler, conf, + serverNameFromClass(protocolImpl.getClass()), secretManager, + portRangeConfig); + setAlignmentContext(alignmentContext); + this.verbose = verbose; + registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass, + protocolImpl); + } + + /** + * Protobuf invoker for {@link RpcInvoker}. + */ + static class ProtoBufRpcInvoker implements RpcInvoker { + private static ProtoClassProtoImpl getProtocolImpl(RPC.Server server, + String protoName, long clientVersion) throws RpcServerException { + ProtoNameVer pv = new ProtoNameVer(protoName, clientVersion); + ProtoClassProtoImpl impl = + server.getProtocolImplMap(RPC.RpcKind.RPC_PROTOCOL_BUFFER).get(pv); + if (impl == null) { // no match for Protocol AND Version + VerProtocolImpl highest = server.getHighestSupportedProtocol( + RPC.RpcKind.RPC_PROTOCOL_BUFFER, protoName); + if (highest == null) { + throw new RpcNoSuchProtocolException( + "Unknown protocol: " + protoName); + } + // protocol supported but not the version that client wants + throw new RPC.VersionMismatch(protoName, clientVersion, + highest.version); + } + return impl; + } + + @Override + /** + * This is a server side method, which is invoked over RPC. On success + * the return response has protobuf response payload. On failure, the + * exception name and the stack trace are returned in the response. + * See {@link HadoopRpcResponseProto} + * + * In this method there three types of exceptions possible and they are + * returned in response as follows. + *
    + *
  1. Exceptions encountered in this method that are returned + * as {@link RpcServerException}
  2. + *
  3. Exceptions thrown by the service is wrapped in ServiceException. + * In that this method returns in response the exception thrown by the + * service.
  4. + *
  5. Other exceptions thrown by the service. They are returned as + * it is.
  6. + *
+ */ + public Writable call(RPC.Server server, String connectionProtocolName, + Writable writableRequest, long receiveTime) throws Exception { + RpcProtobufRequest request = (RpcProtobufRequest) writableRequest; + RequestHeaderProto rpcRequest = request.getRequestHeader(); + String methodName = rpcRequest.getMethodName(); + + /** + * RPCs for a particular interface (ie protocol) are done using a + * IPC connection that is setup using rpcProxy. + * The rpcProxy's has a declared protocol name that is + * sent form client to server at connection time. + * + * Each Rpc call also sends a protocol name + * (called declaringClassprotocolName). This name is usually the same + * as the connection protocol name except in some cases. + * For example metaProtocols such ProtocolInfoProto which get info + * about the protocol reuse the connection but need to indicate that + * the actual protocol is different (i.e. the protocol is + * ProtocolInfoProto) since they reuse the connection; in this case + * the declaringClassProtocolName field is set to the ProtocolInfoProto. + */ + + String declaringClassProtoName = + rpcRequest.getDeclaringClassProtocolName(); + long clientVersion = rpcRequest.getClientProtocolVersion(); + if (server.verbose) { + LOG.info("Call: connectionProtocolName=" + connectionProtocolName + + ", method=" + methodName); + } + + ProtoClassProtoImpl protocolImpl = getProtocolImpl(server, + declaringClassProtoName, clientVersion); + BlockingService service = (BlockingService) protocolImpl.protocolImpl; + MethodDescriptor methodDescriptor = service.getDescriptorForType() + .findMethodByName(methodName); + if (methodDescriptor == null) { + String msg = "Unknown method " + methodName + " called on " + + connectionProtocolName + " protocol."; + LOG.warn(msg); + throw new RpcNoSuchMethodException(msg); + } + Message prototype = service.getRequestPrototype(methodDescriptor); + Message param = request.getValue(prototype); + + Message result; + Call currentCall = Server.getCurCall().get(); + try { + server.rpcDetailedMetrics.init(protocolImpl.protocolClass); + CURRENT_CALL_INFO.set(new CallInfo(server, methodName)); + currentCall.setDetailedMetricsName(methodName); + result = service.callBlockingMethod(methodDescriptor, null, param); + // Check if this needs to be a deferred response, + // by checking the ThreadLocal callback being set + if (CURRENT_CALLBACK.get() != null) { + currentCall.deferResponse(); + CURRENT_CALLBACK.set(null); + return null; + } + } catch (ServiceException e) { + Exception exception = (Exception) e.getCause(); + currentCall.setDetailedMetricsName( + exception.getClass().getSimpleName()); + throw (Exception) e.getCause(); + } catch (Exception e) { + currentCall.setDetailedMetricsName(e.getClass().getSimpleName()); + throw e; + } finally { + CURRENT_CALL_INFO.set(null); + } + return RpcWritable.wrap(result); + } + } + } + + // htrace in the ipc layer creates the span name based on toString() + // which uses the rpc header. in the normal case we want to defer decoding + // the rpc header until needed by the rpc engine. + static class RpcProtobufRequest extends RpcWritable.Buffer { + private volatile RequestHeaderProto requestHeader; + private Message payload; + + RpcProtobufRequest() { + } + + RpcProtobufRequest(RequestHeaderProto header, Message payload) { + this.requestHeader = header; + this.payload = payload; + } + + RequestHeaderProto getRequestHeader() throws IOException { + if (getByteBuffer() != null && requestHeader == null) { + requestHeader = getValue(RequestHeaderProto.getDefaultInstance()); + } + return requestHeader; + } + + @Override + public void writeTo(ResponseBuffer out) throws IOException { + requestHeader.writeDelimitedTo(out); + if (payload != null) { + payload.writeDelimitedTo(out); + } + } + + // this is used by htrace to name the span. + @Override + public String toString() { + try { + RequestHeaderProto header = getRequestHeader(); + return header.getDeclaringClassProtocolName() + "." + + header.getMethodName(); + } catch (IOException e) { + throw new IllegalArgumentException(e); + } + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java index 50b70ca4bec1a..f85adb17d3f8e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java @@ -18,12 +18,17 @@ package org.apache.hadoop.ipc; -import org.apache.hadoop.thirdparty.protobuf.Message; +import com.google.protobuf.Message; +/** + * This engine uses Protobuf 2.5.0. Recommended to upgrade to Protobuf 3.x + * from hadoop-thirdparty and use ProtobufRpcEngineCallback2. + */ +@Deprecated public interface ProtobufRpcEngineCallback { - public void setResponse(Message message); + void setResponse(Message message); - public void error(Throwable t); + void error(Throwable t); } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback2.java new file mode 100644 index 0000000000000..e8c09f56282e6 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback2.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ipc; + +import org.apache.hadoop.thirdparty.protobuf.Message; + +public interface ProtobufRpcEngineCallback2 { + + public void setResponse(Message message); + + public void error(Throwable t); + +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java index 4f95863b03db6..e794cb913c232 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java @@ -1043,7 +1043,7 @@ protected Server(String bindAddress, int port, private void initProtocolMetaInfo(Configuration conf) { RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ProtocolMetaInfoServerSideTranslatorPB xlator = new ProtocolMetaInfoServerSideTranslatorPB(this); BlockingService protocolInfoBlockingService = ProtocolInfoService @@ -1067,7 +1067,7 @@ public Server addProtocol(RpcKind rpcKind, Class protocolClass, @Override public Writable call(RPC.RpcKind rpcKind, String protocol, Writable rpcRequest, long receiveTime) throws Exception { - return getRpcInvoker(rpcKind).call(this, protocol, rpcRequest, + return getServerRpcInvoker(rpcKind).call(this, protocol, rpcRequest, receiveTime); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java index 84ecba1d34e9c..0ce78e54a43a0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java @@ -114,7 +114,7 @@ public static boolean isMethodSupported(Object rpcProxy, Class protocol, if (versionMap == null) { Configuration conf = new Configuration(); RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ProtocolMetaInfoPB protocolInfoProxy = getProtocolMetaInfoProxy(rpcProxy, conf); GetProtocolSignatureRequestProto.Builder builder = diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java index 63812f47f2db0..5202c6b356177 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ipc; -import java.util.concurrent.TimeUnit; +import org.apache.hadoop.ipc.metrics.RpcMetrics; /** * Implement this interface to be used for RPC scheduling and backoff. @@ -62,12 +62,12 @@ default void addResponseTime(String callName, Schedulable schedulable, // this interface, a default implementation is supplied which uses the old // method. All new implementations MUST override this interface and should // NOT use the other addResponseTime method. - int queueTimeMs = (int) - details.get(ProcessingDetails.Timing.QUEUE, TimeUnit.MILLISECONDS); - int processingTimeMs = (int) - details.get(ProcessingDetails.Timing.PROCESSING, TimeUnit.MILLISECONDS); + int queueTime = (int) + details.get(ProcessingDetails.Timing.QUEUE, RpcMetrics.TIMEUNIT); + int processingTime = (int) + details.get(ProcessingDetails.Timing.PROCESSING, RpcMetrics.TIMEUNIT); addResponseTime(callName, schedulable.getPriorityLevel(), - queueTimeMs, processingTimeMs); + queueTime, processingTime); } void stop(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java index 6604bd0cc1c68..f5f0d071f39ed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java @@ -42,6 +42,8 @@ static RpcWritable wrap(Object o) { return (RpcWritable)o; } else if (o instanceof Message) { return new ProtobufWrapper((Message)o); + } else if (o instanceof com.google.protobuf.Message) { + return new ProtobufWrapperLegacy((com.google.protobuf.Message) o); } else if (o instanceof Writable) { return new WritableWrapper((Writable)o); } @@ -132,6 +134,49 @@ T readFrom(ByteBuffer bb) throws IOException { } } + // adapter for Protobufs. + static class ProtobufWrapperLegacy extends RpcWritable { + private com.google.protobuf.Message message; + + ProtobufWrapperLegacy(com.google.protobuf.Message message) { + this.message = message; + } + + com.google.protobuf.Message getMessage() { + return message; + } + + @Override + void writeTo(ResponseBuffer out) throws IOException { + int length = message.getSerializedSize(); + length += com.google.protobuf.CodedOutputStream. + computeUInt32SizeNoTag(length); + out.ensureCapacity(length); + message.writeDelimitedTo(out); + } + + @SuppressWarnings("unchecked") + @Override + T readFrom(ByteBuffer bb) throws IOException { + // using the parser with a byte[]-backed coded input stream is the + // most efficient way to deserialize a protobuf. it has a direct + // path to the PB ctor that doesn't create multi-layered streams + // that internally buffer. + com.google.protobuf.CodedInputStream cis = + com.google.protobuf.CodedInputStream.newInstance( + bb.array(), bb.position() + bb.arrayOffset(), bb.remaining()); + try { + cis.pushLimit(cis.readRawVarint32()); + message = message.getParserForType().parseFrom(cis); + cis.checkLastTagWas(0); + } finally { + // advance over the bytes read. + bb.position(bb.position() + cis.getTotalBytesRead()); + } + return (T)message; + } + } + /** * adapter to allow decoding of writables and protobufs from a byte buffer. */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 4448164f4b137..907d55f9be347 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -304,7 +304,11 @@ public Class getRpcRequestWrapper( RpcKindMapValue val = rpcKindMap.get(ProtoUtil.convert(rpcKind)); return (val == null) ? null : val.rpcRequestWrapperClass; } - + + protected RpcInvoker getServerRpcInvoker(RPC.RpcKind rpcKind) { + return getRpcInvoker(rpcKind); + } + public static RpcInvoker getRpcInvoker(RPC.RpcKind rpcKind) { RpcKindMapValue val = rpcKindMap.get(rpcKind); return (val == null) ? null : val.rpcInvoker; @@ -2688,15 +2692,15 @@ private void processRpcRequest(RpcRequestHeaderProto header, call.setPriorityLevel(callQueue.getPriorityLevel(call)); call.markCallCoordinated(false); if(alignmentContext != null && call.rpcRequest != null && - (call.rpcRequest instanceof ProtobufRpcEngine.RpcProtobufRequest)) { + (call.rpcRequest instanceof ProtobufRpcEngine2.RpcProtobufRequest)) { // if call.rpcRequest is not RpcProtobufRequest, will skip the following // step and treat the call as uncoordinated. As currently only certain // ClientProtocol methods request made through RPC protobuf needs to be // coordinated. String methodName; String protoName; - ProtobufRpcEngine.RpcProtobufRequest req = - (ProtobufRpcEngine.RpcProtobufRequest) call.rpcRequest; + ProtobufRpcEngine2.RpcProtobufRequest req = + (ProtobufRpcEngine2.RpcProtobufRequest) call.rpcRequest; try { methodName = req.getRequestHeader().getMethodName(); protoName = req.getRequestHeader().getDeclaringClassProtocolName(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java index 624edc96b8ae7..cf4b4a9810c4f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java @@ -273,7 +273,11 @@ void registerSource(String name, String desc, MetricsSource source) { T register(final String name, final String description, final T sink) { LOG.debug(name +", "+ description); if (allSinks.containsKey(name)) { - LOG.warn("Sink "+ name +" already exists!"); + if(sinks.get(name) == null) { + registerSink(name, description, sink); + } else { + LOG.warn("Sink "+ name +" already exists!"); + } return sink; } allSinks.put(name, sink); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java index 1b50498bbaf5a..4aef03a5e645f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.util; import java.lang.management.ManagementFactory; -import java.util.HashMap; +import java.util.Collections; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -70,8 +70,7 @@ private MBeans() { */ static public ObjectName register(String serviceName, String nameName, Object theMbean) { - return register(serviceName, nameName, new HashMap(), - theMbean); + return register(serviceName, nameName, Collections.emptyMap(), theMbean); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java index af487ed5c61a5..803fcec8d6c77 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java @@ -130,8 +130,7 @@ public List resolve(List names) { */ @Override public Map getSwitchMap() { - Map switchMap = new HashMap(cache); - return switchMap; + return new HashMap<>(cache); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java index 9f52fed9678b9..893012befcf44 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java @@ -196,10 +196,8 @@ public List getDatanodesInRack(String loc) { loc = loc.substring(1); } InnerNode rack = (InnerNode) clusterMap.getLoc(loc); - if (rack == null) { - return null; - } - return new ArrayList(rack.getChildren()); + return (rack == null) ? new ArrayList<>(0) + : new ArrayList<>(rack.getChildren()); } finally { netlock.readLock().unlock(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java index cd3514c4bce16..2beda8401f8d1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java @@ -25,6 +25,7 @@ import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -130,7 +131,7 @@ public synchronized List resolve(List names) { if (map == null) { LOG.warn("Failed to read topology table. " + NetworkTopology.DEFAULT_RACK + " will be used for all nodes."); - map = new HashMap(); + map = Collections.emptyMap(); } } List results = new ArrayList(names.size()); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java index b762df2acc022..6f799c1542095 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java @@ -19,6 +19,8 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashSet; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -69,8 +71,8 @@ public class CompositeGroupsMapping public synchronized List getGroups(String user) throws IOException { Set groupSet = new TreeSet(); - List groups = null; for (GroupMappingServiceProvider provider : providersList) { + List groups = Collections.emptyList(); try { groups = provider.getGroups(user); } catch (Exception e) { @@ -78,17 +80,15 @@ public synchronized List getGroups(String user) throws IOException { user, provider.getClass().getSimpleName(), e.toString()); LOG.debug("Stacktrace: ", e); } - if (groups != null && ! groups.isEmpty()) { + if (!groups.isEmpty()) { groupSet.addAll(groups); if (!combined) break; } } - List results = new ArrayList(groupSet.size()); - results.addAll(groupSet); - return results; + return new ArrayList<>(groupSet); } - + /** * Caches groups, no need to do that for this provider */ @@ -107,6 +107,29 @@ public void cacheGroupsAdd(List groups) throws IOException { // does nothing in this provider of user to groups mapping } + @Override + public synchronized Set getGroupsSet(String user) throws IOException { + Set groupSet = new HashSet(); + + Set groups = null; + for (GroupMappingServiceProvider provider : providersList) { + try { + groups = provider.getGroupsSet(user); + } catch (Exception e) { + LOG.warn("Unable to get groups for user {} via {} because: {}", + user, provider.getClass().getSimpleName(), e.toString()); + LOG.debug("Stacktrace: ", e); + } + if (groups != null && !groups.isEmpty()) { + groupSet.addAll(groups); + if (!combined) { + break; + } + } + } + return groupSet; + } + @Override public synchronized Configuration getConf() { return conf; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java index 8b90f5bc7af9e..ff6c86d5febf3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.List; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -52,4 +53,13 @@ public interface GroupMappingServiceProvider { * @throws IOException */ public void cacheGroupsAdd(List groups) throws IOException; + + /** + * Get all various group memberships of a given user. + * Returns EMPTY set in case of non-existing user + * @param user User's name + * @return set of group memberships of user + * @throws IOException + */ + Set getGroupsSet(String user) throws IOException; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java index b29278bd20751..961ec7d591924 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java @@ -26,7 +26,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadFactory; @@ -78,8 +77,8 @@ public class Groups { private final GroupMappingServiceProvider impl; - private final LoadingCache> cache; - private final AtomicReference>> staticMapRef = + private final LoadingCache> cache; + private final AtomicReference>> staticMapRef = new AtomicReference<>(); private final long cacheTimeout; private final long negativeCacheTimeout; @@ -168,8 +167,7 @@ private void parseStaticMapping(Configuration conf) { CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES_DEFAULT); Collection mappings = StringUtils.getStringCollection( staticMapping, ";"); - Map> staticUserToGroupsMap = - new HashMap>(); + Map> staticUserToGroupsMap = new HashMap<>(); for (String users : mappings) { Collection userToGroups = StringUtils.getStringCollection(users, "="); @@ -181,10 +179,10 @@ private void parseStaticMapping(Configuration conf) { String[] userToGroupsArray = userToGroups.toArray(new String[userToGroups .size()]); String user = userToGroupsArray[0]; - List groups = Collections.emptyList(); + Set groups = Collections.emptySet(); if (userToGroupsArray.length == 2) { - groups = (List) StringUtils - .getStringCollection(userToGroupsArray[1]); + groups = new LinkedHashSet(StringUtils + .getStringCollection(userToGroupsArray[1])); } staticUserToGroupsMap.put(user, groups); } @@ -203,15 +201,47 @@ private IOException noGroupsForUser(String user) { /** * Get the group memberships of a given user. * If the user's group is not cached, this method may block. + * Note this method can be expensive as it involves Set->List conversion. + * For user with large group membership (i.e., > 1000 groups), we recommend + * using getGroupSet to avoid the conversion and fast membership look up via + * contains(). * @param user User's name - * @return the group memberships of the user + * @return the group memberships of the user as list * @throws IOException if user does not exist + * @deprecated Use {@link #getGroupsSet(String user)} instead. */ + @Deprecated public List getGroups(final String user) throws IOException { + return Collections.unmodifiableList(new ArrayList<>( + getGroupInternal(user))); + } + + /** + * Get the group memberships of a given user. + * If the user's group is not cached, this method may block. + * This provide better performance when user has large group membership via + * 1) avoid set->list->set conversion for the caller UGI/PermissionCheck + * 2) fast lookup using contains() via Set instead of List + * @param user User's name + * @return the group memberships of the user as set + * @throws IOException if user does not exist + */ + public Set getGroupsSet(final String user) throws IOException { + return Collections.unmodifiableSet(getGroupInternal(user)); + } + + /** + * Get the group memberships of a given user. + * If the user's group is not cached, this method may block. + * @param user User's name + * @return the group memberships of the user as Set + * @throws IOException if user does not exist + */ + private Set getGroupInternal(final String user) throws IOException { // No need to lookup for groups of static users - Map> staticUserToGroupsMap = staticMapRef.get(); + Map> staticUserToGroupsMap = staticMapRef.get(); if (staticUserToGroupsMap != null) { - List staticMapping = staticUserToGroupsMap.get(user); + Set staticMapping = staticUserToGroupsMap.get(user); if (staticMapping != null) { return staticMapping; } @@ -267,7 +297,7 @@ public long read() { /** * Deals with loading data into the cache. */ - private class GroupCacheLoader extends CacheLoader> { + private class GroupCacheLoader extends CacheLoader> { private ListeningExecutorService executorService; @@ -308,7 +338,7 @@ private class GroupCacheLoader extends CacheLoader> { * @throws IOException to prevent caching negative entries */ @Override - public List load(String user) throws Exception { + public Set load(String user) throws Exception { LOG.debug("GroupCacheLoader - load."); TraceScope scope = null; Tracer tracer = Tracer.curThreadTracer(); @@ -316,9 +346,9 @@ public List load(String user) throws Exception { scope = tracer.newScope("Groups#fetchGroupList"); scope.addKVAnnotation("user", user); } - List groups = null; + Set groups = null; try { - groups = fetchGroupList(user); + groups = fetchGroupSet(user); } finally { if (scope != null) { scope.close(); @@ -334,9 +364,7 @@ public List load(String user) throws Exception { throw noGroupsForUser(user); } - // return immutable de-duped list - return Collections.unmodifiableList( - new ArrayList<>(new LinkedHashSet<>(groups))); + return groups; } /** @@ -345,8 +373,8 @@ public List load(String user) throws Exception { * implementation, otherwise is arranges for the cache to be updated later */ @Override - public ListenableFuture> reload(final String key, - List oldValue) + public ListenableFuture> reload(final String key, + Set oldValue) throws Exception { LOG.debug("GroupCacheLoader - reload (async)."); if (!reloadGroupsInBackground) { @@ -354,19 +382,16 @@ public ListenableFuture> reload(final String key, } backgroundRefreshQueued.incrementAndGet(); - ListenableFuture> listenableFuture = - executorService.submit(new Callable>() { - @Override - public List call() throws Exception { - backgroundRefreshQueued.decrementAndGet(); - backgroundRefreshRunning.incrementAndGet(); - List results = load(key); - return results; - } + ListenableFuture> listenableFuture = + executorService.submit(() -> { + backgroundRefreshQueued.decrementAndGet(); + backgroundRefreshRunning.incrementAndGet(); + Set results = load(key); + return results; }); - Futures.addCallback(listenableFuture, new FutureCallback>() { + Futures.addCallback(listenableFuture, new FutureCallback>() { @Override - public void onSuccess(List result) { + public void onSuccess(Set result) { backgroundRefreshSuccess.incrementAndGet(); backgroundRefreshRunning.decrementAndGet(); } @@ -380,11 +405,12 @@ public void onFailure(Throwable t) { } /** - * Queries impl for groups belonging to the user. This could involve I/O and take awhile. + * Queries impl for groups belonging to the user. + * This could involve I/O and take awhile. */ - private List fetchGroupList(String user) throws IOException { + private Set fetchGroupSet(String user) throws IOException { long startMs = timer.monotonicNow(); - List groupList = impl.getGroups(user); + Set groups = impl.getGroupsSet(user); long endMs = timer.monotonicNow(); long deltaMs = endMs - startMs ; UserGroupInformation.metrics.addGetGroups(deltaMs); @@ -392,8 +418,7 @@ private List fetchGroupList(String user) throws IOException { LOG.warn("Potential performance problem: getGroups(user=" + user +") " + "took " + deltaMs + " milliseconds."); } - - return groupList; + return groups; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java index a0f6142a3c5c7..6c24427f3e50e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java @@ -20,8 +20,11 @@ import java.io.IOException; import java.util.Arrays; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; +import org.apache.commons.collections.CollectionUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -75,6 +78,18 @@ static private void logError(int groupId, String error) { @Override public List getGroups(String user) throws IOException { + return Arrays.asList(getGroupsInternal(user)); + } + + @Override + public Set getGroupsSet(String user) throws IOException { + String[] groups = getGroupsInternal(user); + Set result = new LinkedHashSet(groups.length); + CollectionUtils.addAll(result, groups); + return result; + } + + private String[] getGroupsInternal(String user) throws IOException { String[] groups = new String[0]; try { groups = getGroupsForUser(user); @@ -85,7 +100,7 @@ public List getGroups(String user) throws IOException { LOG.info("Error getting groups for " + user + ": " + e.getMessage()); } } - return Arrays.asList(groups); + return groups; } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java index f1644305d917e..cc47df1462678 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.List; +import java.util.Set; import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.PerformanceAdvisory; @@ -61,4 +62,9 @@ public void cacheGroupsAdd(List groups) throws IOException { impl.cacheGroupsAdd(groups); } + @Override + public Set getGroupsSet(String user) throws IOException { + return impl.getGroupsSet(user); + } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java index 9ba55e436f3f8..65bd1c00333a9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.LinkedList; @@ -125,6 +126,6 @@ protected synchronized List getUsersForNetgroup(String netgroup) { if (users != null && users.length != 0) { return Arrays.asList(users); } - return new LinkedList(); + return Collections.emptyList(); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java index fcc47cb796f33..3d4bd588a5344 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.List; +import java.util.Set; import org.apache.hadoop.util.NativeCodeLoader; import org.slf4j.Logger; @@ -60,4 +61,9 @@ public void cacheGroupsAdd(List groups) throws IOException { impl.cacheGroupsAdd(groups); } + @Override + public Set getGroupsSet(String user) throws IOException { + return impl.getGroupsSet(user); + } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java index 8e71f69c858d1..3f656990517af 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java @@ -33,6 +33,7 @@ import java.util.Collections; import java.util.Hashtable; import java.util.Iterator; +import java.util.LinkedHashSet; import java.util.List; import java.util.HashSet; import java.util.Collection; @@ -302,12 +303,12 @@ public class LdapGroupsMapping } private DirContext ctx; - private Configuration conf; + private volatile Configuration conf; - private Iterator ldapUrls; + private volatile Iterator ldapUrls; private String currentLdapUrl; - private boolean useSsl; + private volatile boolean useSsl; private String keystore; private String keystorePass; private String truststore; @@ -320,21 +321,21 @@ public class LdapGroupsMapping private Iterator bindUsers; private BindUserInfo currentBindUser; - private String userbaseDN; + private volatile String userbaseDN; private String groupbaseDN; private String groupSearchFilter; - private String userSearchFilter; - private String memberOfAttr; + private volatile String userSearchFilter; + private volatile String memberOfAttr; private String groupMemberAttr; - private String groupNameAttr; - private int groupHierarchyLevels; - private String posixUidAttr; - private String posixGidAttr; + private volatile String groupNameAttr; + private volatile int groupHierarchyLevels; + private volatile String posixUidAttr; + private volatile String posixGidAttr; private boolean isPosix; - private boolean useOneQuery; + private volatile boolean useOneQuery; private int numAttempts; - private int numAttemptsBeforeFailover; - private String ldapCtxFactoryClassName; + private volatile int numAttemptsBeforeFailover; + private volatile String ldapCtxFactoryClassName; /** * Returns list of groups for a user. @@ -348,38 +349,7 @@ public class LdapGroupsMapping */ @Override public synchronized List getGroups(String user) { - /* - * Normal garbage collection takes care of removing Context instances when - * they are no longer in use. Connections used by Context instances being - * garbage collected will be closed automatically. So in case connection is - * closed and gets CommunicationException, retry some times with new new - * DirContext/connection. - */ - - // Tracks the number of attempts made using the same LDAP server - int atemptsBeforeFailover = 1; - - for (int attempt = 1; attempt <= numAttempts; attempt++, - atemptsBeforeFailover++) { - try { - return doGetGroups(user, groupHierarchyLevels); - } catch (AuthenticationException e) { - switchBindUser(e); - } catch (NamingException e) { - LOG.warn("Failed to get groups for user {} (attempt={}/{}) using {}. " + - "Exception: ", user, attempt, numAttempts, currentLdapUrl, e); - LOG.trace("TRACE", e); - - if (failover(atemptsBeforeFailover, numAttemptsBeforeFailover)) { - atemptsBeforeFailover = 0; - } - } - - // Reset ctx so that new DirContext can be created with new connection - this.ctx = null; - } - - return Collections.emptyList(); + return new ArrayList<>(getGroupsSet(user)); } /** @@ -458,10 +428,10 @@ private NamingEnumeration lookupPosixGroup(SearchResult result, * @return a list of strings representing group names of the user. * @throws NamingException if unable to find group names */ - private List lookupGroup(SearchResult result, DirContext c, + private Set lookupGroup(SearchResult result, DirContext c, int goUpHierarchy) throws NamingException { - List groups = new ArrayList<>(); + Set groups = new LinkedHashSet<>(); Set groupDNs = new HashSet<>(); NamingEnumeration groupResults; @@ -484,11 +454,7 @@ private List lookupGroup(SearchResult result, DirContext c, getGroupNames(groupResult, groups, groupDNs, goUpHierarchy > 0); } if (goUpHierarchy > 0 && !isPosix) { - // convert groups to a set to ensure uniqueness - Set groupset = new HashSet<>(groups); - goUpGroupHierarchy(groupDNs, goUpHierarchy, groupset); - // convert set back to list for compatibility - groups = new ArrayList<>(groupset); + goUpGroupHierarchy(groupDNs, goUpHierarchy, groups); } } return groups; @@ -507,7 +473,7 @@ private List lookupGroup(SearchResult result, DirContext c, * return an empty string array. * @throws NamingException if unable to get group names */ - List doGetGroups(String user, int goUpHierarchy) + Set doGetGroups(String user, int goUpHierarchy) throws NamingException { DirContext c = getDirContext(); @@ -518,11 +484,11 @@ List doGetGroups(String user, int goUpHierarchy) if (!results.hasMoreElements()) { LOG.debug("doGetGroups({}) returned no groups because the " + "user is not found.", user); - return new ArrayList<>(); + return Collections.emptySet(); } SearchResult result = results.nextElement(); - List groups = null; + Set groups = Collections.emptySet(); if (useOneQuery) { try { /** @@ -536,7 +502,7 @@ List doGetGroups(String user, int goUpHierarchy) memberOfAttr + "' attribute." + "Returned user object: " + result.toString()); } - groups = new ArrayList<>(); + groups = new LinkedHashSet<>(); NamingEnumeration groupEnumeration = groupDNAttr.getAll(); while (groupEnumeration.hasMore()) { String groupDN = groupEnumeration.next().toString(); @@ -548,7 +514,7 @@ List doGetGroups(String user, int goUpHierarchy) "the second LDAP query using the user's DN.", e); } } - if (groups == null || groups.isEmpty() || goUpHierarchy > 0) { + if (groups.isEmpty() || goUpHierarchy > 0) { groups = lookupGroup(result, c, goUpHierarchy); } LOG.debug("doGetGroups({}) returned {}", user, groups); @@ -700,6 +666,42 @@ public void cacheGroupsAdd(List groups) { // does nothing in this provider of user to groups mapping } + @Override + public Set getGroupsSet(String user) { + /* + * Normal garbage collection takes care of removing Context instances when + * they are no longer in use. Connections used by Context instances being + * garbage collected will be closed automatically. So in case connection is + * closed and gets CommunicationException, retry some times with new new + * DirContext/connection. + */ + + // Tracks the number of attempts made using the same LDAP server + int atemptsBeforeFailover = 1; + + for (int attempt = 1; attempt <= numAttempts; attempt++, + atemptsBeforeFailover++) { + try { + return doGetGroups(user, groupHierarchyLevels); + } catch (AuthenticationException e) { + switchBindUser(e); + } catch (NamingException e) { + LOG.warn("Failed to get groups for user {} (attempt={}/{}) using {}. " + + "Exception: ", user, attempt, numAttempts, currentLdapUrl, e); + LOG.trace("TRACE", e); + + if (failover(atemptsBeforeFailover, numAttemptsBeforeFailover)) { + atemptsBeforeFailover = 0; + } + } + + // Reset ctx so that new DirContext can be created with new connection + this.ctx = null; + } + + return Collections.emptySet(); + } + @Override public synchronized Configuration getConf() { return conf; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NetgroupCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NetgroupCache.java index 4495a66c4322f..aa06c59a64814 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NetgroupCache.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NetgroupCache.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.security; +import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; -import java.util.LinkedList; import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -61,7 +61,7 @@ public static void getNetgroups(final String user, * @return list of cached groups */ public static List getNetgroupNames() { - return new LinkedList(getGroups()); + return new ArrayList<>(getGroups()); } private static Set getGroups() { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NullGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NullGroupsMapping.java index f3d048daf990a..9592ecc32c012 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NullGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NullGroupsMapping.java @@ -15,8 +15,10 @@ */ package org.apache.hadoop.security; +import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Set; /** * This class provides groups mapping for {@link UserGroupInformation} when the @@ -31,6 +33,19 @@ public class NullGroupsMapping implements GroupMappingServiceProvider { public void cacheGroupsAdd(List groups) { } + /** + * Get all various group memberships of a given user. + * Returns EMPTY set in case of non-existing user + * + * @param user User's name + * @return set of group memberships of user + * @throws IOException + */ + @Override + public Set getGroupsSet(String user) throws IOException { + return Collections.emptySet(); + } + /** * Returns an empty list. * @param user ignored diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java index 6accf2fdced02..5fadcc3ced58b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.security; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -25,7 +24,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; import java.util.stream.Collectors; /** @@ -88,4 +89,18 @@ public synchronized List getGroups(String user) { } } + public synchronized Set getGroupsSet(String user) { + Set groups = super.getGroupsSet(user); + switch (rule) { + case TO_UPPER: + return groups.stream().map(StringUtils::toUpperCase).collect( + Collectors.toCollection(LinkedHashSet::new)); + case TO_LOWER: + return groups.stream().map(StringUtils::toLowerCase).collect( + Collectors.toCollection(LinkedHashSet::new)); + case NONE: + default: + return groups; + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java index 92ea83d8f1da5..93231075282da 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java @@ -24,6 +24,7 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.regex.Matcher; @@ -534,7 +535,7 @@ synchronized private void updateMapIncr(final int id, static final class PassThroughMap extends HashMap { public PassThroughMap() { - this(new HashMap()); + this(Collections.emptyMap()); } public PassThroughMap(Map mapping) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java index 31f43980552f2..f3432a6f91762 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java @@ -18,8 +18,11 @@ package org.apache.hadoop.security; import java.io.IOException; -import java.util.LinkedList; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; import java.util.StringTokenizer; import java.util.concurrent.TimeUnit; @@ -53,7 +56,7 @@ public class ShellBasedUnixGroupsMapping extends Configured private long timeout = CommonConfigurationKeys. HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT; - private static final List EMPTY_GROUPS = new LinkedList<>(); + private static final Set EMPTY_GROUPS_SET = Collections.emptySet(); @Override public void setConf(Configuration conf) { @@ -94,7 +97,7 @@ public String toString() { */ @Override public List getGroups(String userName) throws IOException { - return getUnixGroups(userName); + return new ArrayList(getUnixGroups(userName)); } /** @@ -115,6 +118,11 @@ public void cacheGroupsAdd(List groups) throws IOException { // does nothing in this provider of user to groups mapping } + @Override + public Set getGroupsSet(String userName) throws IOException { + return getUnixGroups(userName); + } + /** * Create a ShellCommandExecutor object using the user's name. * @@ -192,44 +200,33 @@ private boolean handleExecutorTimeout( * group is returned first. * @throws IOException if encounter any error when running the command */ - private List getUnixGroups(String user) throws IOException { + private Set getUnixGroups(String user) throws IOException { ShellCommandExecutor executor = createGroupExecutor(user); - List groups; + Set groups; try { executor.execute(); groups = resolveFullGroupNames(executor.getOutput()); } catch (ExitCodeException e) { if (handleExecutorTimeout(executor, user)) { - return EMPTY_GROUPS; + return EMPTY_GROUPS_SET; } else { try { groups = resolvePartialGroupNames(user, e.getMessage(), executor.getOutput()); } catch (PartialGroupNameException pge) { LOG.warn("unable to return groups for user {}", user, pge); - return EMPTY_GROUPS; + return EMPTY_GROUPS_SET; } } } catch (IOException ioe) { if (handleExecutorTimeout(executor, user)) { - return EMPTY_GROUPS; + return EMPTY_GROUPS_SET; } else { // If its not an executor timeout, we should let the caller handle it throw ioe; } } - - // remove duplicated primary group - if (!Shell.WINDOWS) { - for (int i = 1; i < groups.size(); i++) { - if (groups.get(i).equals(groups.get(0))) { - groups.remove(i); - break; - } - } - } - return groups; } @@ -242,13 +239,13 @@ private List getUnixGroups(String user) throws IOException { * @return a linked list of group names * @throws PartialGroupNameException */ - private List parsePartialGroupNames(String groupNames, + private Set parsePartialGroupNames(String groupNames, String groupIDs) throws PartialGroupNameException { StringTokenizer nameTokenizer = new StringTokenizer(groupNames, Shell.TOKEN_SEPARATOR_REGEX); StringTokenizer idTokenizer = new StringTokenizer(groupIDs, Shell.TOKEN_SEPARATOR_REGEX); - List groups = new LinkedList(); + Set groups = new LinkedHashSet<>(); while (nameTokenizer.hasMoreTokens()) { // check for unresolvable group names. if (!idTokenizer.hasMoreTokens()) { @@ -277,10 +274,10 @@ private List parsePartialGroupNames(String groupNames, * @param userName the user's name * @param errMessage error message from the shell command * @param groupNames the incomplete list of group names - * @return a list of resolved group names + * @return a set of resolved group names * @throws PartialGroupNameException if the resolution fails or times out */ - private List resolvePartialGroupNames(String userName, + private Set resolvePartialGroupNames(String userName, String errMessage, String groupNames) throws PartialGroupNameException { // Exception may indicate that some group names are not resolvable. // Shell-based implementation should tolerate unresolvable groups names, @@ -322,16 +319,16 @@ private List resolvePartialGroupNames(String userName, } /** - * Split group names into a linked list. + * Split group names into a set. * * @param groupNames a string representing the user's group names - * @return a linked list of group names + * @return a set of group names */ @VisibleForTesting - protected List resolveFullGroupNames(String groupNames) { + protected Set resolveFullGroupNames(String groupNames) { StringTokenizer tokenizer = new StringTokenizer(groupNames, Shell.TOKEN_SEPARATOR_REGEX); - List groups = new LinkedList(); + Set groups = new LinkedHashSet<>(); while (tokenizer.hasMoreTokens()) { groups.add(tokenizer.nextToken()); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 8c84a8d31a063..5269e5a33061a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -40,7 +40,6 @@ import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.EnumMap; @@ -1483,8 +1482,8 @@ public UserGroupInformation getRealUser() { * map that has the translation of usernames to groups. */ private static class TestingGroups extends Groups { - private final Map> userToGroupsMapping = - new HashMap>(); + private final Map> userToGroupsMapping = + new HashMap<>(); private Groups underlyingImplementation; private TestingGroups(Groups underlyingImplementation) { @@ -1494,17 +1493,22 @@ private TestingGroups(Groups underlyingImplementation) { @Override public List getGroups(String user) throws IOException { - List result = userToGroupsMapping.get(user); - + return new ArrayList<>(getGroupsSet(user)); + } + + @Override + public Set getGroupsSet(String user) throws IOException { + Set result = userToGroupsMapping.get(user); if (result == null) { - result = underlyingImplementation.getGroups(user); + result = underlyingImplementation.getGroupsSet(user); } - return result; } private void setUserGroups(String user, String[] groups) { - userToGroupsMapping.put(user, Arrays.asList(groups)); + Set groupsSet = new LinkedHashSet<>(); + Collections.addAll(groupsSet, groups); + userToGroupsMapping.put(user, groupsSet); } } @@ -1563,11 +1567,11 @@ public String getShortUserName() { } public String getPrimaryGroupName() throws IOException { - List groups = getGroups(); - if (groups.isEmpty()) { + Set groupsSet = getGroupsSet(); + if (groupsSet.isEmpty()) { throw new IOException("There is no primary group for UGI " + this); } - return groups.get(0); + return groupsSet.iterator().next(); } /** @@ -1680,21 +1684,24 @@ private synchronized Credentials getCredentialsInternal() { } /** - * Get the group names for this user. {@link #getGroups()} is less + * Get the group names for this user. {@link #getGroupsSet()} is less * expensive alternative when checking for a contained element. * @return the list of users with the primary group first. If the command * fails, it returns an empty list. */ public String[] getGroupNames() { - List groups = getGroups(); - return groups.toArray(new String[groups.size()]); + Collection groupsSet = getGroupsSet(); + return groupsSet.toArray(new String[groupsSet.size()]); } /** - * Get the group names for this user. + * Get the group names for this user. {@link #getGroupsSet()} is less + * expensive alternative when checking for a contained element. * @return the list of users with the primary group first. If the command * fails, it returns an empty list. + * @deprecated Use {@link #getGroupsSet()} instead. */ + @Deprecated public List getGroups() { ensureInitialized(); try { @@ -1705,6 +1712,21 @@ public List getGroups() { } } + /** + * Get the groups names for the user as a Set. + * @return the set of users with the primary group first. If the command + * fails, it returns an empty set. + */ + public Set getGroupsSet() { + ensureInitialized(); + try { + return groups.getGroupsSet(getShortUserName()); + } catch (IOException ie) { + LOG.debug("Failed to get groups for user {}", getShortUserName(), ie); + return Collections.emptySet(); + } + } + /** * Return the username. */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java index 8af47d6e9d5e9..e86d918b05504 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java @@ -24,6 +24,7 @@ import java.util.HashSet; import java.util.LinkedList; import java.util.List; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -231,8 +232,9 @@ public final boolean isUserInList(UserGroupInformation ugi) { if (allAllowed || users.contains(ugi.getShortUserName())) { return true; } else if (!groups.isEmpty()) { - for (String group : ugi.getGroups()) { - if (groups.contains(group)) { + Set ugiGroups = ugi.getGroupsSet(); + for (String group : groups) { + if (ugiGroups.contains(group)) { return true; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java index f329accec7553..eb65799757f66 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java @@ -22,18 +22,23 @@ import java.io.DataInputStream; import java.io.IOException; import java.security.MessageDigest; +import java.util.ArrayList; import java.util.Collection; -import java.util.HashMap; +import java.util.Collections; import java.util.HashSet; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import javax.crypto.SecretKey; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.Text; +import org.apache.hadoop.metrics2.util.Metrics2Util.NameValuePair; +import org.apache.hadoop.metrics2.util.Metrics2Util.TopN; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.HadoopKerberosName; import org.apache.hadoop.security.token.SecretManager; @@ -63,8 +68,14 @@ private String formatTokenId(TokenIdent id) { * to DelegationTokenInformation. Protected by this object lock. */ protected final Map currentTokens - = new HashMap(); - + = new ConcurrentHashMap<>(); + + /** + * Map of token real owners to its token count. This is used to generate + * metrics of top users by owned tokens. + */ + protected final Map tokenOwnerStats = new ConcurrentHashMap<>(); + /** * Sequence number to create DelegationTokenIdentifier. * Protected by this object lock. @@ -75,7 +86,7 @@ private String formatTokenId(TokenIdent id) { * Access to allKeys is protected by this object lock */ protected final Map allKeys - = new HashMap(); + = new ConcurrentHashMap<>(); /** * Access to currentId is protected by this object lock. @@ -292,6 +303,7 @@ protected DelegationTokenInformation getTokenInfo(TokenIdent ident) { protected void storeToken(TokenIdent ident, DelegationTokenInformation tokenInfo) throws IOException { currentTokens.put(ident, tokenInfo); + addTokenForOwnerStats(ident); storeNewToken(ident, tokenInfo.getRenewDate()); } @@ -339,6 +351,7 @@ public synchronized void addPersistedDelegationToken( if (getTokenInfo(identifier) == null) { currentTokens.put(identifier, new DelegationTokenInformation(renewDate, password, getTrackingIdIfEnabled(identifier))); + addTokenForOwnerStats(identifier); } else { throw new IOException("Same delegation token being added twice: " + formatTokenId(identifier)); @@ -578,6 +591,7 @@ public synchronized TokenIdent cancelToken(Token token, if (info == null) { throw new InvalidToken("Token not found " + formatTokenId(id)); } + removeTokenForOwnerStats(id); removeStoredToken(id); return id; } @@ -634,6 +648,7 @@ private void removeExpiredToken() throws IOException { long renewDate = entry.getValue().getRenewDate(); if (renewDate < now) { expiredTokens.add(entry.getKey()); + removeTokenForOwnerStats(entry.getKey()); i.remove(); } } @@ -726,4 +741,88 @@ public TokenIdent decodeTokenIdentifier(Token token) throws IOExcept return token.decodeIdentifier(); } + /** + * Return top token real owners list as well as the tokens count. + * + * @param n top number of users + * @return map of owners to counts + */ + public List getTopTokenRealOwners(int n) { + n = Math.min(n, tokenOwnerStats.size()); + if (n == 0) { + return new ArrayList<>(); + } + + TopN topN = new TopN(n); + for (Map.Entry entry : tokenOwnerStats.entrySet()) { + topN.offer(new NameValuePair( + entry.getKey(), entry.getValue())); + } + + List list = new ArrayList<>(); + while (!topN.isEmpty()) { + list.add(topN.poll()); + } + Collections.reverse(list); + return list; + } + + /** + * Return the real owner for a token. If this is a token from a proxy user, + * the real/effective user will be returned. + * + * @param id + * @return real owner + */ + private String getTokenRealOwner(TokenIdent id) { + String realUser; + if (id.getRealUser() != null && !id.getRealUser().toString().isEmpty()) { + realUser = id.getRealUser().toString(); + } else { + // if there is no real user -> this is a non proxy user + // the user itself is the real owner + realUser = id.getUser().getUserName(); + } + return realUser; + } + + /** + * Add token stats to the owner to token count mapping. + * + * @param id + */ + private void addTokenForOwnerStats(TokenIdent id) { + String realOwner = getTokenRealOwner(id); + tokenOwnerStats.put(realOwner, + tokenOwnerStats.getOrDefault(realOwner, 0L)+1); + } + + /** + * Remove token stats to the owner to token count mapping. + * + * @param id + */ + private void removeTokenForOwnerStats(TokenIdent id) { + String realOwner = getTokenRealOwner(id); + if (tokenOwnerStats.containsKey(realOwner)) { + // unlikely to be less than 1 but in case + if (tokenOwnerStats.get(realOwner) <= 1) { + tokenOwnerStats.remove(realOwner); + } else { + tokenOwnerStats.put(realOwner, tokenOwnerStats.get(realOwner)-1); + } + } + } + + /** + * This method syncs token information from currentTokens to tokenOwnerStats. + * It is used when the currentTokens is initialized or refreshed. This is + * called from a single thread thus no synchronization is needed. + */ + protected void syncTokenOwnerStats() { + tokenOwnerStats.clear(); + for (TokenIdent id : currentTokens.keySet()) { + addTokenForOwnerStats(id); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java index cd3b8c0c0f279..276573ba00c9b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java @@ -55,6 +55,7 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager; +import static org.apache.hadoop.util.Time.now; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.NoNodeException; @@ -79,7 +80,7 @@ public abstract class ZKDelegationTokenSecretManager extends AbstractDelegationTokenSecretManager { - private static final String ZK_CONF_PREFIX = "zk-dt-secret-manager."; + public static final String ZK_CONF_PREFIX = "zk-dt-secret-manager."; public static final String ZK_DTSM_ZK_NUM_RETRIES = ZK_CONF_PREFIX + "zkNumRetries"; public static final String ZK_DTSM_ZK_SESSION_TIMEOUT = ZK_CONF_PREFIX @@ -100,6 +101,9 @@ public abstract class ZKDelegationTokenSecretManager 0) { LOG.warn("Ignored {} nodes while loading {} cache.", count, cacheName); } @@ -457,9 +471,7 @@ private void processKeyAddOrUpdate(byte[] data) throws IOException { DataInputStream din = new DataInputStream(bin); DelegationKey key = new DelegationKey(); key.readFields(din); - synchronized (this) { - allKeys.put(key.getKeyId(), key); - } + allKeys.put(key.getKeyId(), key); } private void processKeyRemoved(String path) { @@ -469,15 +481,13 @@ private void processKeyRemoved(String path) { int j = tokSeg.indexOf('_'); if (j > 0) { int keyId = Integer.parseInt(tokSeg.substring(j + 1)); - synchronized (this) { - allKeys.remove(keyId); - } + allKeys.remove(keyId); } } } - private void processTokenAddOrUpdate(ChildData data) throws IOException { - ByteArrayInputStream bin = new ByteArrayInputStream(data.getData()); + protected TokenIdent processTokenAddOrUpdate(byte[] data) throws IOException { + ByteArrayInputStream bin = new ByteArrayInputStream(data); DataInputStream din = new DataInputStream(bin); TokenIdent ident = createIdentifier(); ident.readFields(din); @@ -488,12 +498,10 @@ private void processTokenAddOrUpdate(ChildData data) throws IOException { if (numRead > -1) { DelegationTokenInformation tokenInfo = new DelegationTokenInformation(renewDate, password); - synchronized (this) { - currentTokens.put(ident, tokenInfo); - // The cancel task might be waiting - notifyAll(); - } + currentTokens.put(ident, tokenInfo); + return ident; } + return null; } private void processTokenRemoved(ChildData data) throws IOException { @@ -501,11 +509,7 @@ private void processTokenRemoved(ChildData data) throws IOException { DataInputStream din = new DataInputStream(bin); TokenIdent ident = createIdentifier(); ident.readFields(din); - synchronized (this) { - currentTokens.remove(ident); - // The cancel task might be waiting - notifyAll(); - } + currentTokens.remove(ident); } @Override @@ -706,7 +710,7 @@ protected DelegationTokenInformation getTokenInfo(TokenIdent ident) { * * @param ident Identifier of the token */ - private synchronized void syncLocalCacheWithZk(TokenIdent ident) { + protected void syncLocalCacheWithZk(TokenIdent ident) { try { DelegationTokenInformation tokenInfo = getTokenInfoFromZK(ident); if (tokenInfo != null && !currentTokens.containsKey(ident)) { @@ -720,16 +724,21 @@ private synchronized void syncLocalCacheWithZk(TokenIdent ident) { } } - private DelegationTokenInformation getTokenInfoFromZK(TokenIdent ident) + protected DelegationTokenInformation getTokenInfoFromZK(TokenIdent ident) throws IOException { return getTokenInfoFromZK(ident, false); } - private DelegationTokenInformation getTokenInfoFromZK(TokenIdent ident, + protected DelegationTokenInformation getTokenInfoFromZK(TokenIdent ident, boolean quiet) throws IOException { String nodePath = getNodePath(ZK_DTSM_TOKENS_ROOT, DELEGATION_TOKEN_PREFIX + ident.getSequenceNumber()); + return getTokenInfoFromZK(nodePath, quiet); + } + + protected DelegationTokenInformation getTokenInfoFromZK(String nodePath, + boolean quiet) throws IOException { try { byte[] data = zkClient.getData().forPath(nodePath); if ((data == null) || (data.length == 0)) { @@ -864,15 +873,30 @@ protected void updateToken(TokenIdent ident, @Override protected void removeStoredToken(TokenIdent ident) throws IOException { + removeStoredToken(ident, false); + } + + protected void removeStoredToken(TokenIdent ident, + boolean checkAgainstZkBeforeDeletion) throws IOException { String nodeRemovePath = getNodePath(ZK_DTSM_TOKENS_ROOT, DELEGATION_TOKEN_PREFIX + ident.getSequenceNumber()); - if (LOG.isDebugEnabled()) { - LOG.debug("Removing ZKDTSMDelegationToken_" - + ident.getSequenceNumber()); - } try { - if (zkClient.checkExists().forPath(nodeRemovePath) != null) { + DelegationTokenInformation dtInfo = getTokenInfoFromZK(ident, true); + if (dtInfo != null) { + // For the case there is no sync or watch miss, it is possible that the + // local storage has expired tokens which have been renewed by peer + // so double check again to avoid accidental delete + if (checkAgainstZkBeforeDeletion + && dtInfo.getRenewDate() > now()) { + LOG.info("Node already renewed by peer " + nodeRemovePath + + " so this token should not be deleted"); + return; + } + if (LOG.isDebugEnabled()) { + LOG.debug("Removing ZKDTSMDelegationToken_" + + ident.getSequenceNumber()); + } while(zkClient.checkExists().forPath(nodeRemovePath) != null){ try { zkClient.delete().guaranteed().forPath(nodeRemovePath); @@ -895,7 +919,7 @@ protected void removeStoredToken(TokenIdent ident) } @Override - public synchronized TokenIdent cancelToken(Token token, + public TokenIdent cancelToken(Token token, String canceller) throws IOException { ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); DataInputStream in = new DataInputStream(buf); @@ -906,7 +930,7 @@ public synchronized TokenIdent cancelToken(Token token, return super.cancelToken(token, canceller); } - private void addOrUpdateToken(TokenIdent ident, + protected void addOrUpdateToken(TokenIdent ident, DelegationTokenInformation info, boolean isUpdate) throws Exception { String nodeCreatePath = getNodePath(ZK_DTSM_TOKENS_ROOT, DELEGATION_TOKEN_PREFIX @@ -933,6 +957,10 @@ private void addOrUpdateToken(TokenIdent ident, } } + public boolean isTokenWatcherEnabled() { + return isTokenWatcherEnabled; + } + /** * Simple implementation of an {@link ACLProvider} that simply returns an ACL * that gives all permissions only to a single principal. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java index 284044fd938a8..1de534f36ba4f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java @@ -20,9 +20,8 @@ import java.io.IOException; import java.io.Writer; import java.text.MessageFormat; -import java.util.HashMap; +import java.util.Collections; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.Map; import java.util.Properties; import java.util.Set; @@ -301,8 +300,7 @@ public boolean managementOperation(AuthenticationToken token, dt.decodeFromUrlString(tokenToRenew); long expirationTime = tokenManager.renewToken(dt, requestUgi.getShortUserName()); - map = new HashMap(); - map.put("long", expirationTime); + map = Collections.singletonMap("long", expirationTime); } catch (IOException ex) { throw new AuthenticationException(ex.toString(), ex); } @@ -358,13 +356,11 @@ public boolean managementOperation(AuthenticationToken token, @SuppressWarnings("unchecked") private static Map delegationTokenToJSON(Token token) throws IOException { - Map json = new LinkedHashMap(); - json.put( + Map json = Collections.singletonMap( KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON, token.encodeToUrlString()); - Map response = new LinkedHashMap(); - response.put(KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_JSON, - json); + Map response = Collections.singletonMap( + KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_JSON, json); return response; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java index c9fec435bfa24..0aa5bafe88423 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -426,7 +427,7 @@ private void recordLifecycleEvent() { @Override public synchronized List getLifecycleHistory() { - return new ArrayList(lifecycleHistory); + return Collections.unmodifiableList(new ArrayList<>(lifecycleHistory)); } /** @@ -483,8 +484,7 @@ public void removeBlocker(String name) { @Override public Map getBlockers() { synchronized (blockerMap) { - Map map = new HashMap(blockerMap); - return map; + return Collections.unmodifiableMap(new HashMap<>(blockerMap)); } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java index 4aa2f23fad730..ee66e90f7c4ee 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java @@ -19,6 +19,7 @@ package org.apache.hadoop.service; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience.Public; @@ -60,7 +61,7 @@ public CompositeService(String name) { */ public List getServices() { synchronized (serviceList) { - return new ArrayList(serviceList); + return Collections.unmodifiableList(new ArrayList<>(serviceList)); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java index 5e8a1f4eb21fb..6d161bf8b613c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java @@ -23,6 +23,7 @@ import java.net.URL; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import com.google.common.annotations.VisibleForTesting; @@ -894,7 +895,7 @@ public List extractCommandOptions(Configuration conf, List args) { int size = args.size(); if (size <= 1) { - return new ArrayList<>(0); + return Collections.emptyList(); } List coreArgs = args.subList(1, size); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java index 766fb0a6557eb..130414c2895b5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java @@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -179,7 +179,7 @@ public int run(String argv[]) throws Exception { servicePrincipal); } RPC.setProtocolEngine(getConf(), TraceAdminProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); InetSocketAddress address = NetUtils.createSocketAddr(hostPort); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); Class xface = TraceAdminProtocolPB.class; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java index 12d1ef01201a2..3cc7a4bb4ea5b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java @@ -28,6 +28,7 @@ import java.io.Writer; import java.lang.reflect.Constructor; import java.net.HttpURLConnection; +import java.util.Collections; import java.util.LinkedHashMap; import java.util.Map; @@ -71,8 +72,8 @@ public static void createServletExceptionResponse( json.put(ERROR_MESSAGE_JSON, getOneLineMessage(ex)); json.put(ERROR_EXCEPTION_JSON, ex.getClass().getSimpleName()); json.put(ERROR_CLASSNAME_JSON, ex.getClass().getName()); - Map jsonResponse = new LinkedHashMap(); - jsonResponse.put(ERROR_JSON, json); + Map jsonResponse = + Collections.singletonMap(ERROR_JSON, json); Writer writer = response.getWriter(); JsonSerialization.writer().writeValue(writer, jsonResponse); writer.flush(); @@ -91,8 +92,7 @@ public static Response createJerseyExceptionResponse(Response.Status status, json.put(ERROR_MESSAGE_JSON, getOneLineMessage(ex)); json.put(ERROR_EXCEPTION_JSON, ex.getClass().getSimpleName()); json.put(ERROR_CLASSNAME_JSON, ex.getClass().getName()); - Map response = new LinkedHashMap(); - response.put(ERROR_JSON, json); + Map response = Collections.singletonMap(ERROR_JSON, json); return Response.status(status).type(MediaType.APPLICATION_JSON). entity(response).build(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index e66c81b4b8df6..0dc49739c4b5a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -28,6 +28,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.Timer; import java.util.TimerTask; @@ -871,6 +872,7 @@ protected Shell(long interval, boolean redirectErrorStream) { this.interval = interval; this.lastTime = (interval < 0) ? 0 : -interval; this.redirectErrorStream = redirectErrorStream; + this.environment = Collections.emptyMap(); } /** @@ -878,7 +880,7 @@ protected Shell(long interval, boolean redirectErrorStream) { * @param env Mapping of environment variables */ protected void setEnvironment(Map env) { - this.environment = env; + this.environment = Objects.requireNonNull(env); } /** @@ -915,9 +917,7 @@ private void runCommand() throws IOException { builder.environment().clear(); } - if (environment != null) { - builder.environment().putAll(this.environment); - } + builder.environment().putAll(this.environment); if (dir != null) { builder.directory(this.dir); diff --git a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto new file mode 100644 index 0000000000000..16ee880e7b720 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * These .proto interfaces are private and stable. + * Please see http://wiki.apache.org/hadoop/Compatibility + * for what changes are allowed for a *stable* .proto interface. + */ +syntax = "proto2"; +/** + * These are the messages used by Hadoop RPC for the Rpc Engine Protocol Buffer + * to marshal the request and response in the RPC layer. + * The messages are sent in addition to the normal RPC header as + * defined in RpcHeader.proto + */ +option java_package = "org.apache.hadoop.ipc.protobuf"; +option java_outer_classname = "ProtobufRpcEngine2Protos"; +option java_generate_equals_and_hash = true; +package hadoop.common; + +/** + * This message is the header for the Protobuf Rpc Engine + * when sending a RPC request from RPC client to the RPC server. + * The actual request (serialized as protobuf) follows this request. + * + * No special header is needed for the Rpc Response for Protobuf Rpc Engine. + * The normal RPC response header (see RpcHeader.proto) are sufficient. + */ +message RequestHeaderProto { + /** Name of the RPC method */ + required string methodName = 1; + + /** + * RPCs for a particular interface (ie protocol) are done using a + * IPC connection that is setup using rpcProxy. + * The rpcProxy's has a declared protocol name that is + * sent form client to server at connection time. + * + * Each Rpc call also sends a protocol name + * (called declaringClassprotocolName). This name is usually the same + * as the connection protocol name except in some cases. + * For example metaProtocols such ProtocolInfoProto which get metainfo + * about the protocol reuse the connection but need to indicate that + * the actual protocol is different (i.e. the protocol is + * ProtocolInfoProto) since they reuse the connection; in this case + * the declaringClassProtocolName field is set to the ProtocolInfoProto + */ + required string declaringClassProtocolName = 2; + + /** protocol version of class declaring the called method */ + required uint64 clientProtocolVersion = 3; +} diff --git a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory deleted file mode 100644 index f0054fedb8e1c..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.fs.FileSystemMultipartUploader$Factory diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 0d583cca57cd0..cf156af196461 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -952,6 +952,124 @@ + + fs.viewfs.overload.scheme.target.hdfs.impl + org.apache.hadoop.hdfs.DistributedFileSystem + The DistributedFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are hdfs. + + + + + fs.viewfs.overload.scheme.target.s3a.impl + org.apache.hadoop.fs.s3a.S3AFileSystem + The S3AFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are s3a. + + + + fs.viewfs.overload.scheme.target.ofs.impl + org.apache.hadoop.fs.ozone.RootedOzoneFileSystem + The RootedOzoneFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are ofs. + + + + + fs.viewfs.overload.scheme.target.o3fs.impl + org.apache.hadoop.fs.ozone.OzoneFileSystem + The OzoneFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are o3fs. + + + + fs.viewfs.overload.scheme.target.ftp.impl + org.apache.hadoop.fs.ftp.FTPFileSystem + The FTPFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are ftp. + + + + + fs.viewfs.overload.scheme.target.webhdfs.impl + org.apache.hadoop.hdfs.web.WebHdfsFileSystem + The WebHdfsFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are webhdfs. + + + + + fs.viewfs.overload.scheme.target.swebhdfs.impl + org.apache.hadoop.hdfs.web.SWebHdfsFileSystem + The SWebHdfsFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are swebhdfs. + + + + + fs.viewfs.overload.scheme.target.file.impl + org.apache.hadoop.fs.LocalFileSystem + The LocalFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are file. + + + + + fs.viewfs.overload.scheme.target.abfs.impl + org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem + The AzureBlobFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are abfs. + + + + + fs.viewfs.overload.scheme.target.abfss.impl + org.apache.hadoop.fs.azurebfs.SecureAzureBlobFileSystem + The SecureAzureBlobFileSystem for view file system overload + scheme when child file system and ViewFSOverloadScheme's schemes are abfss. + + + + + fs.viewfs.overload.scheme.target.wasb.impl + org.apache.hadoop.fs.azure.NativeAzureFileSystem + The NativeAzureFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are wasb. + + + + + fs.viewfs.overload.scheme.target.swift.impl + org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem + The SwiftNativeFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are swift. + + + + + fs.viewfs.overload.scheme.target.oss.impl + org.apache.hadoop.fs.aliyun.oss.AliyunOSSFileSystem + The AliyunOSSFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are oss. + + + + + fs.viewfs.overload.scheme.target.http.impl + org.apache.hadoop.fs.http.HttpFileSystem + The HttpFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are http. + + + + + fs.viewfs.overload.scheme.target.https.impl + org.apache.hadoop.fs.http.HttpsFileSystem + The HttpsFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are https. + + + fs.AbstractFileSystem.ftp.impl org.apache.hadoop.fs.ftp.FtpFs diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/multipartuploader.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/multipartuploader.md index 629c0c418fdf2..906c592eea09d 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/multipartuploader.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/multipartuploader.md @@ -14,14 +14,14 @@ - + -# class `org.apache.hadoop.fs.MultipartUploader` +# interface `org.apache.hadoop.fs.MultipartUploader` -The abstract `MultipartUploader` class is the original class to upload a file +The `MultipartUploader` can upload a file using multiple parts to Hadoop-supported filesystems. The benefits of a multipart upload is that the file can be uploaded from multiple clients or processes in parallel and the results will not be visible to other clients until @@ -30,13 +30,12 @@ the `complete` function is called. When implemented by an object store, uploaded data may incur storage charges, even before it is visible in the filesystems. Users of this API must be diligent and always perform best-effort attempts to complete or abort the upload. +The `abortUploadsUnderPath(path)` operation can help here. ## Invariants -All the requirements of a valid MultipartUploader are considered implicit +All the requirements of a valid `MultipartUploader` are considered implicit econditions and postconditions: -all operations on a valid MultipartUploader MUST result in a new -MultipartUploader that is also valid. The operations of a single multipart upload may take place across different instance of a multipart uploader, across different processes and hosts. @@ -45,16 +44,28 @@ It is therefore a requirement that: 1. All state needed to upload a part, complete an upload or abort an upload must be contained within or retrievable from an upload handle. -1. If an upload handle is marshalled to another process, then, if the -receiving process has the correct permissions, it may participate in the -upload, by uploading one or more parts, by completing an upload, and/or by -aborting the upload. +1. That handle MUST be serializable; it MUST be deserializable to different +processes executing the exact same version of Hadoop. + +1. different hosts/processes MAY upload different parts, sequentially or +simultaneously. The order in which they are uploaded to the filesystem +MUST NOT constrain the order in which the data is stored in the final file. + +1. An upload MAY be completed on a different instance than any which uploaded +parts. + +1. The output of an upload MUST NOT be visible at the final destination +until the upload may complete. + +1. It is not an error if a single multipart uploader instance initiates +or completes multiple uploads files to the same destination sequentially, +irrespective of whether or not the store supports concurrent uploads. ## Concurrency Multiple processes may upload parts of a multipart upload simultaneously. -If a call is made to `initialize(path)` to a destination where an active +If a call is made to `startUpload(path)` to a destination where an active upload is in progress, implementations MUST perform one of the two operations. * Reject the call as a duplicate. @@ -70,9 +81,17 @@ the in-progress upload, if it has not completed, must not be included in the final file, in whole or in part. Implementations SHOULD raise an error in the `putPart()` operation. +# Serialization Compatibility + +Users MUST NOT expect that serialized PathHandle versions are compatible across +* different multipart uploader implementations. +* different versions of the same implementation. + +That is: all clients MUST use the exact same version of Hadoop. + ## Model -A File System which supports Multipart Uploads extends the existing model +A FileSystem/FileContext which supports Multipart Uploads extends the existing model `(Directories, Files, Symlinks)` to one of `(Directories, Files, Symlinks, Uploads)` `Uploads` of type `Map[UploadHandle -> Map[PartHandle -> UploadPart]`. @@ -112,11 +131,40 @@ However, if Part Handles are rapidly recycled, there is a risk that the nominall idempotent operation `abort(FS, uploadHandle)` could unintentionally cancel a successor operation which used the same Upload Handle. +## Asynchronous API + +All operations return `CompletableFuture<>` types which must be +subsequently evaluated to get their return values. + +1. The execution of the operation MAY be a blocking operation in on the call thread. +1. If not, it SHALL be executed in a separate thread and MUST complete by the time the +future evaluation returns. +1. Some/All preconditions MAY be evaluated at the time of initial invocation, +1. All those which are not evaluated at that time, MUST Be evaluated during the execution +of the future. + + +What this means is that when an implementation interacts with a fast file system/store all preconditions +including the existence of files MAY be evaluated early, whereas and implementation interacting with a +remote object store whose probes are slow MAY verify preconditions in the asynchronous phase -especially +those which interact with the remote store. + +Java CompletableFutures do not work well with checked exceptions. The Hadoop codease is still evolving the +details of the exception handling here, as more use is made of the asynchronous APIs. Assume that any +precondition failure which declares that an `IOException` MUST be raised may have that operation wrapped in a +`RuntimeException` of some form if evaluated in the future; this also holds for any other `IOException` +raised during the operations. + +### `close()` + +Applications MUST call `close()` after using an uploader; this is so it may release other +objects, update statistics, etc. + ## State Changing Operations -### `UploadHandle initialize(Path path)` +### `CompletableFuture startUpload(Path)` -Initialized a Multipart Upload, returning an upload handle for use in +Starts a Multipart Upload, ultimately returning an `UploadHandle` for use in subsequent operations. #### Preconditions @@ -128,17 +176,15 @@ if exists(FS, path) and not isFile(FS, path) raise PathIsDirectoryException, IOE ``` If a filesystem does not support concurrent uploads to a destination, -then the following precondition is added +then the following precondition is added: ```python if path in values(FS.Uploads) raise PathExistsException, IOException - ``` - #### Postconditions -The outcome of this operation is that the filesystem state is updated with a new +Once the initialization operation completes, the filesystem state is updated with a new active upload, with a new handle, this handle being returned to the caller. ```python @@ -147,9 +193,10 @@ FS' = FS where FS'.Uploads(handle') == {} result = handle' ``` -### `PartHandle putPart(Path path, InputStream inputStream, int partNumber, UploadHandle uploadHandle, long lengthInBytes)` +### `CompletableFuture putPart(UploadHandle uploadHandle, int partNumber, Path filePath, InputStream inputStream, long lengthInBytes)` -Upload a part for the multipart upload. +Upload a part for the specific multipart upload, eventually being returned an opaque part handle +represting this part of the specified upload. #### Preconditions @@ -170,10 +217,12 @@ FS' = FS where FS'.uploads(uploadHandle).parts(partHandle') == data' result = partHandle' ``` -The data is stored in the filesystem, pending completion. +The data is stored in the filesystem, pending completion. It MUST NOT be visible at the destination path. +It MAY be visible in a temporary path somewhere in the file system; +This is implementation-specific and MUST NOT be relied upon. -### `PathHandle complete(Path path, Map parts, UploadHandle multipartUploadId)` +### ` CompletableFuture complete(UploadHandle uploadId, Path filePath, Map handles)` Complete the multipart upload. @@ -188,11 +237,23 @@ uploadHandle in keys(FS.Uploads) else raise FileNotFoundException FS.Uploads(uploadHandle).path == path if exists(FS, path) and not isFile(FS, path) raise PathIsDirectoryException, IOException parts.size() > 0 +forall k in keys(parts): k > 0 +forall k in keys(parts): + not exists(k2 in keys(parts)) where (parts[k] == parts[k2]) ``` -If there are handles in the MPU which aren't included in the map, then the omitted -parts will not be a part of the resulting file. It is up to the implementation -of the MultipartUploader to make sure the leftover parts are cleaned up. +All keys MUST be greater than zero, and there MUST not be any duplicate +references to the same parthandle. +These validations MAY be performed at any point during the operation. +After a failure, there is no guarantee that a `complete()` call for this +upload with a valid map of paths will complete. +Callers SHOULD invoke `abort()` after any such failure to ensure cleanup. + +if `putPart()` operations For this `uploadHandle` were performed But whose +`PathHandle` Handles were not included in this request -the omitted +parts SHALL NOT be a part of the resulting file. + +The MultipartUploader MUST clean up any such outstanding entries. In the case of backing stores that support directories (local filesystem, HDFS, etc), if, at the point of completion, there is now a directory at the @@ -206,14 +267,14 @@ exists(FS', path') and result = PathHandle(path') FS' = FS where FS.Files(path) == UploadData' and not uploadHandle in keys(FS'.uploads) ``` -The PathHandle is returned by the complete operation so subsequent operations +The `PathHandle` is returned by the complete operation so subsequent operations will be able to identify that the data has not changed in the meantime. The order of parts in the uploaded by file is that of the natural order of -parts: part 1 is ahead of part 2, etc. +parts in the map: part 1 is ahead of part 2, etc. -### `void abort(Path path, UploadHandle multipartUploadId)` +### `CompletableFuture abort(UploadHandle uploadId, Path filePath)` Abort a multipart upload. The handle becomes invalid and not subject to reuse. @@ -233,3 +294,23 @@ FS' = FS where not uploadHandle in keys(FS'.uploads) ``` A subsequent call to `abort()` with the same handle will fail, unless the handle has been recycled. + +### `CompletableFuture abortUploadsUnderPath(Path path)` + +Perform a best-effort cleanup of all uploads under a path. + +returns a future which resolves to. + + -1 if unsuppported + >= 0 if supported + +Because it is best effort a strict postcondition isn't possible. +The ideal postcondition is all uploads under the path are aborted, +and the count is the number of uploads aborted: + +```python +FS'.uploads forall upload in FS.uploads: + not isDescendant(FS, path, upload.path) +return len(forall upload in FS.uploads: + isDescendant(FS, path, upload.path)) +``` diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java index 1ce23a0eb81f2..dd9f41a7a3527 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java @@ -124,6 +124,25 @@ public void initializeMemberVariables() { xmlPrefixToSkipCompare.add("fs.adl."); xmlPropsToSkipCompare.add("fs.AbstractFileSystem.adl.impl"); + // ViewfsOverloadScheme target fs impl property keys are dynamically + // constructed and they are advanced props. + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.abfs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.abfss.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.file.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.ftp.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.hdfs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.http.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.https.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.ofs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.o3fs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.oss.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.s3a.impl"); + xmlPropsToSkipCompare. + add("fs.viewfs.overload.scheme.target.swebhdfs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.webhdfs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.wasb.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.swift.impl"); + // Azure properties are in a different class // - org.apache.hadoop.fs.azure.AzureNativeFileSystemStore // - org.apache.hadoop.fs.azure.SASKeyGeneratorImpl diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java index 39d3bae655d85..dd801a53a3dae 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java @@ -18,7 +18,7 @@ package org.apache.hadoop.conf; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import com.google.common.collect.Lists; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java index 73fd2802ab1d1..de278dd37ef46 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java @@ -37,7 +37,7 @@ import org.junit.Assert; import org.junit.Test; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java index f0057a6c6d902..6cd450610b390 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java @@ -137,6 +137,12 @@ public Token[] addDelegationTokens(String renewer, Credentials creds) void setQuota(Path f, long namespaceQuota, long storagespaceQuota); void setQuotaByStorageType(Path f, StorageType type, long quota); StorageStatistics getStorageStatistics(); + + /* + Not passed through as the inner implementation will miss features + of the filter such as checksums. + */ + MultipartUploaderBuilder createMultipartUploader(Path basePath); } @Test @@ -278,6 +284,23 @@ public void testRenameOptions() throws Exception { verify(mockFs).rename(eq(src), eq(dst), eq(opt)); } + /** + * Verify that filterFS always returns false, even if local/rawlocal + * ever implement multipart uploads. + */ + @Test + public void testFilterPathCapabilites() throws Exception { + try (FilterFileSystem flfs = new FilterLocalFileSystem()) { + flfs.initialize(URI.create("filter:/"), conf); + Path src = new Path("/src"); + assertFalse( + "hasPathCapability(FS_MULTIPART_UPLOADER) should have failed for " + + flfs, + flfs.hasPathCapability(src, + CommonPathCapabilities.FS_MULTIPART_UPLOADER)); + } + } + private void checkInit(FilterFileSystem fs, boolean expectInit) throws Exception { URI uri = URI.create("filter:/"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java index 2097633839112..8050ce6b4427d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java @@ -248,6 +248,9 @@ CompletableFuture openFileWithOptions( CompletableFuture openFileWithOptions( Path path, OpenFileParameters parameters) throws IOException; + + MultipartUploaderBuilder createMultipartUploader(Path basePath) + throws IOException; } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java index 7a8f0830eda37..31926964c897c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java @@ -26,8 +26,10 @@ import java.util.HashMap; import java.util.Map; import java.util.Random; +import java.util.concurrent.CompletableFuture; import com.google.common.base.Charsets; +import org.assertj.core.api.Assertions; import org.junit.Assume; import org.junit.Test; import org.slf4j.Logger; @@ -35,22 +37,31 @@ import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.io.IOUtils; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BBUploadHandle; +import org.apache.hadoop.fs.CommonPathCapabilities; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.MultipartUploader; -import org.apache.hadoop.fs.MultipartUploaderFactory; import org.apache.hadoop.fs.PartHandle; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathHandle; import org.apache.hadoop.fs.UploadHandle; +import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.DurationInfo; import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyPathExists; +import static org.apache.hadoop.fs.impl.FutureIOSupport.awaitFuture; import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; import static org.apache.hadoop.test.LambdaTestUtils.eventually; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +/** + * Tests of multipart uploads. + *

+ * Note: some of the tests get a random uploader between + * the two which are available. If tests fail intermittently, + * it may be because different uploaders are being selected. + */ public abstract class AbstractContractMultipartUploaderTest extends AbstractFSContractTestBase { @@ -63,36 +74,44 @@ public abstract class AbstractContractMultipartUploaderTest extends */ protected static final int SMALL_FILE = 100; - private MultipartUploader mpu; - private MultipartUploader mpu2; + protected static final int CONSISTENCY_INTERVAL = 1000; + + private MultipartUploader uploader0; + private MultipartUploader uploader1; private final Random random = new Random(); private UploadHandle activeUpload; private Path activeUploadPath; - protected String getMethodName() { - return methodName.getMethodName(); - } - @Override public void setup() throws Exception { super.setup(); - Configuration conf = getContract().getConf(); - mpu = MultipartUploaderFactory.get(getFileSystem(), conf); - mpu2 = MultipartUploaderFactory.get(getFileSystem(), conf); + + final FileSystem fs = getFileSystem(); + Path testPath = getContract().getTestPath(); + uploader0 = fs.createMultipartUploader(testPath).build(); + uploader1 = fs.createMultipartUploader(testPath).build(); } @Override public void teardown() throws Exception { - if (mpu!= null && activeUpload != null) { + MultipartUploader uploader = getUploader(1); + if (uploader != null) { + if (activeUpload != null) { + abortUploadQuietly(activeUpload, activeUploadPath); + } try { - mpu.abort(activeUploadPath, activeUpload); - } catch (FileNotFoundException ignored) { - /* this is fine */ + // round off with an abort of all uploads + Path teardown = getContract().getTestPath(); + LOG.info("Teardown: aborting outstanding uploads under {}", teardown); + CompletableFuture f + = uploader.abortUploadsUnderPath(teardown); + f.get(); } catch (Exception e) { - LOG.info("in teardown", e); + LOG.warn("Exeception in teardown", e); } } - cleanupWithLogger(LOG, mpu, mpu2); + + cleanupWithLogger(LOG, uploader0, uploader1); super.teardown(); } @@ -192,16 +211,16 @@ protected int timeToBecomeConsistentMillis() { * @param index index of upload * @return an uploader */ - protected MultipartUploader mpu(int index) { - return (index % 2 == 0) ? mpu : mpu2; + protected MultipartUploader getUploader(int index) { + return (index % 2 == 0) ? uploader0 : uploader1; } /** * Pick a multipart uploader at random. * @return an uploader */ - protected MultipartUploader randomMpu() { - return mpu(random.nextInt(10)); + protected MultipartUploader getRandomUploader() { + return getUploader(random.nextInt(10)); } /** @@ -211,39 +230,71 @@ protected MultipartUploader randomMpu() { @Test public void testSingleUpload() throws Exception { Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); MessageDigest origDigest = DigestUtils.getMd5Digest(); int size = SMALL_FILE; byte[] payload = generatePayload(1, size); origDigest.update(payload); + // use a single uploader + // note: the same is used here as it found a bug in the S3Guard + // DDB bulk operation state upload -the previous operation had + // added an entry to the ongoing state; this second call + // was interpreted as an inconsistent write. + MultipartUploader completer = uploader0; + // and upload with uploader 1 to validate cross-uploader uploads PartHandle partHandle = putPart(file, uploadHandle, 1, payload); partHandles.put(1, partHandle); - PathHandle fd = completeUpload(file, uploadHandle, partHandles, - origDigest, - size); + PathHandle fd = complete(completer, uploadHandle, file, + partHandles); + + validateUpload(file, origDigest, size); + // verify that if the implementation processes data immediately + // then a second attempt at the upload will fail. if (finalizeConsumesUploadIdImmediately()) { intercept(FileNotFoundException.class, - () -> mpu.complete(file, partHandles, uploadHandle)); + () -> complete(completer, uploadHandle, file, partHandles)); } else { - PathHandle fd2 = mpu.complete(file, partHandles, uploadHandle); + // otherwise, the same or other uploader can try again. + PathHandle fd2 = complete(completer, uploadHandle, file, partHandles); assertArrayEquals("Path handles differ", fd.toByteArray(), fd2.toByteArray()); } } /** - * Initialize an upload. + * Complete IO for a specific uploader; await the response. + * @param uploader uploader + * @param uploadHandle Identifier + * @param file Target path for upload + * @param partHandles handles map of part number to part handle + * @return unique PathHandle identifier for the uploaded file. + */ + protected PathHandle complete( + final MultipartUploader uploader, + final UploadHandle uploadHandle, + final Path file, + final Map partHandles) + throws IOException { + try (DurationInfo d = + new DurationInfo(LOG, "Complete upload to %s", file)) { + return awaitFuture( + uploader.complete(uploadHandle, file, partHandles)); + } + } + + /** + * start an upload. * This saves the path and upload handle as the active * upload, for aborting in teardown * @param dest destination * @return the handle * @throws IOException failure to initialize */ - protected UploadHandle initializeUpload(final Path dest) throws IOException { + protected UploadHandle startUpload(final Path dest) throws IOException { activeUploadPath = dest; - activeUpload = randomMpu().initialize(dest); + activeUpload = awaitFuture(getRandomUploader().startUpload(dest)); return activeUpload; } @@ -283,12 +334,17 @@ protected PartHandle putPart(final Path file, final int index, final byte[] payload) throws IOException { ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); - PartHandle partHandle = mpu(index) - .putPart(file, - new ByteArrayInputStream(payload), - index, - uploadHandle, - payload.length); + PartHandle partHandle; + try (DurationInfo d = + new DurationInfo(LOG, "Put part %d (size %s) %s", + index, + payload.length, + file)) { + partHandle = awaitFuture(getUploader(index) + .putPart(uploadHandle, index, file, + new ByteArrayInputStream(payload), + payload.length)); + } timer.end("Uploaded part %s", index); LOG.info("Upload bandwidth {} MB/s", timer.bandwidthDescription(payload.length)); @@ -296,7 +352,7 @@ protected PartHandle putPart(final Path file, } /** - * Complete an upload with the active MPU instance. + * Complete an upload with a random uploader. * @param file destination * @param uploadHandle handle * @param partHandles map of handles @@ -312,36 +368,64 @@ private PathHandle completeUpload(final Path file, final int expectedLength) throws IOException { PathHandle fd = complete(file, uploadHandle, partHandles); - FileStatus status = verifyPathExists(getFileSystem(), - "Completed file", file); - assertEquals("length of " + status, - expectedLength, status.getLen()); + validateUpload(file, origDigest, expectedLength); + return fd; + } + + /** + * Complete an upload with a random uploader. + * @param file destination + * @param origDigest digest of source data (may be null) + * @param expectedLength expected length of result. + * @throws IOException IO failure + */ + private void validateUpload(final Path file, + final MessageDigest origDigest, + final int expectedLength) throws IOException { + verifyPathExists(getFileSystem(), + "Completed file", file); + verifyFileLength(file, expectedLength); if (origDigest != null) { verifyContents(file, origDigest, expectedLength); } - return fd; } /** * Verify the contents of a file. * @param file path * @param origDigest digest - * @param expectedLength expected length (for logging B/W) + * @param expectedLength expected length (for logging download bandwidth) * @throws IOException IO failure */ protected void verifyContents(final Path file, final MessageDigest origDigest, final int expectedLength) throws IOException { ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer(); - assertArrayEquals("digest of source and " + file - + " differ", - origDigest.digest(), digest(file)); + Assertions.assertThat(digest(file)) + .describedAs("digest of uploaded file %s", file) + .isEqualTo(origDigest.digest()); timer2.end("Completed digest", file); LOG.info("Download bandwidth {} MB/s", timer2.bandwidthDescription(expectedLength)); } + /** + * Verify the length of a file. + * @param file path + * @param expectedLength expected length + * @throws IOException IO failure + */ + private void verifyFileLength(final Path file, final long expectedLength) + throws IOException { + FileStatus st = getFileSystem().getFileStatus(file); + Assertions.assertThat(st) + .describedAs("Uploaded file %s", st) + .matches(FileStatus::isFile) + .extracting(FileStatus::getLen) + .isEqualTo(expectedLength); + } + /** * Perform the inner complete without verification. * @param file destination path @@ -353,21 +437,37 @@ protected void verifyContents(final Path file, private PathHandle complete(final Path file, final UploadHandle uploadHandle, final Map partHandles) throws IOException { - ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); - PathHandle fd = randomMpu().complete(file, partHandles, uploadHandle); - timer.end("Completed upload to %s", file); - return fd; + return complete(getRandomUploader(), uploadHandle, file, + partHandles); } /** * Abort an upload. - * @param file path * @param uploadHandle handle + * @param file path * @throws IOException failure */ - private void abortUpload(final Path file, UploadHandle uploadHandle) + private void abortUpload(UploadHandle uploadHandle, + final Path file) throws IOException { - randomMpu().abort(file, uploadHandle); + try (DurationInfo d = + new DurationInfo(LOG, "Abort upload to %s", file)) { + awaitFuture(getRandomUploader().abort(uploadHandle, file)); + } + } + + /** + * Abort an upload; swallows exceptions. + * @param uploadHandle handle + * @param file path + */ + private void abortUploadQuietly(UploadHandle uploadHandle, Path file) { + try { + abortUpload(uploadHandle, file); + } catch (FileNotFoundException ignored) { + } catch (Exception e) { + LOG.info("aborting {}: {}", file, e.toString()); + } } /** @@ -377,10 +477,10 @@ private void abortUpload(final Path file, UploadHandle uploadHandle) @Test public void testMultipartUpload() throws Exception { Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); MessageDigest origDigest = DigestUtils.getMd5Digest(); - final int payloadCount = getTestPayloadCount(); + int payloadCount = getTestPayloadCount(); for (int i = 1; i <= payloadCount; ++i) { PartHandle partHandle = buildAndPutPart(file, uploadHandle, i, origDigest); @@ -400,16 +500,16 @@ public void testMultipartUploadEmptyPart() throws Exception { FileSystem fs = getFileSystem(); Path file = path("testMultipartUpload"); try (MultipartUploader uploader = - MultipartUploaderFactory.get(fs, null)) { - UploadHandle uploadHandle = uploader.initialize(file); + fs.createMultipartUploader(file).build()) { + UploadHandle uploadHandle = uploader.startUpload(file).get(); Map partHandles = new HashMap<>(); MessageDigest origDigest = DigestUtils.getMd5Digest(); byte[] payload = new byte[0]; origDigest.update(payload); InputStream is = new ByteArrayInputStream(payload); - PartHandle partHandle = uploader.putPart(file, is, 1, uploadHandle, - payload.length); + PartHandle partHandle = awaitFuture( + uploader.putPart(uploadHandle, 1, file, is, payload.length)); partHandles.put(1, partHandle); completeUpload(file, uploadHandle, partHandles, origDigest, 0); } @@ -422,7 +522,7 @@ public void testMultipartUploadEmptyPart() throws Exception { @Test public void testUploadEmptyBlock() throws Exception { Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); partHandles.put(1, putPart(file, uploadHandle, 1, new byte[0])); completeUpload(file, uploadHandle, partHandles, null, 0); @@ -435,10 +535,10 @@ public void testUploadEmptyBlock() throws Exception { @Test public void testMultipartUploadReverseOrder() throws Exception { Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); MessageDigest origDigest = DigestUtils.getMd5Digest(); - final int payloadCount = getTestPayloadCount(); + int payloadCount = getTestPayloadCount(); for (int i = 1; i <= payloadCount; ++i) { byte[] payload = generatePayload(i); origDigest.update(payload); @@ -459,7 +559,7 @@ public void testMultipartUploadReverseOrderNonContiguousPartNumbers() throws Exception { describe("Upload in reverse order and the part numbers are not contiguous"); Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); MessageDigest origDigest = DigestUtils.getMd5Digest(); int payloadCount = 2 * getTestPayloadCount(); for (int i = 2; i <= payloadCount; i += 2) { @@ -482,22 +582,22 @@ public void testMultipartUploadReverseOrderNonContiguousPartNumbers() public void testMultipartUploadAbort() throws Exception { describe("Upload and then abort it before completing"); Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); - int end = 10; + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); for (int i = 12; i > 10; i--) { partHandles.put(i, buildAndPutPart(file, uploadHandle, i, null)); } - abortUpload(file, uploadHandle); + abortUpload(uploadHandle, file); String contents = "ThisIsPart49\n"; int len = contents.getBytes(Charsets.UTF_8).length; InputStream is = IOUtils.toInputStream(contents, "UTF-8"); intercept(IOException.class, - () -> mpu.putPart(file, is, 49, uploadHandle, len)); + () -> awaitFuture( + uploader0.putPart(uploadHandle, 49, file, is, len))); intercept(IOException.class, - () -> mpu.complete(file, partHandles, uploadHandle)); + () -> complete(uploader0, uploadHandle, file, partHandles)); assertPathDoesNotExist("Uploaded file should not exist", file); @@ -505,9 +605,9 @@ public void testMultipartUploadAbort() throws Exception { // consumed by finalization operations (complete, abort). if (finalizeConsumesUploadIdImmediately()) { intercept(FileNotFoundException.class, - () -> abortUpload(file, uploadHandle)); + () -> abortUpload(uploadHandle, file)); } else { - abortUpload(file, uploadHandle); + abortUpload(uploadHandle, file); } } @@ -519,31 +619,55 @@ public void testAbortUnknownUpload() throws Exception { Path file = methodPath(); ByteBuffer byteBuffer = ByteBuffer.wrap( "invalid-handle".getBytes(Charsets.UTF_8)); - UploadHandle uploadHandle = BBUploadHandle.from(byteBuffer); intercept(FileNotFoundException.class, - () -> abortUpload(file, uploadHandle)); + () -> abortUpload(BBUploadHandle.from(byteBuffer), file)); } /** - * Trying to abort with a handle of size 0 must fail. + * Trying to abort an upload with no data does not create a file. */ @Test public void testAbortEmptyUpload() throws Exception { describe("initialize upload and abort before uploading data"); Path file = methodPath(); - abortUpload(file, initializeUpload(file)); + abortUpload(startUpload(file), file); assertPathDoesNotExist("Uploaded file should not exist", file); } + + /** + * Trying to abort an upload with no data does not create a file. + */ + @Test + public void testAbortAllPendingUploads() throws Exception { + describe("initialize upload and abort the pending upload"); + Path path = methodPath(); + Path file = new Path(path, "child"); + UploadHandle upload = startUpload(file); + try { + CompletableFuture oF + = getRandomUploader().abortUploadsUnderPath(path.getParent()); + int abortedUploads = awaitFuture(oF); + if (abortedUploads >= 0) { + // uploads can be aborted + Assertions.assertThat(abortedUploads) + .describedAs("Number of uploads aborted") + .isGreaterThanOrEqualTo(1); + assertPathDoesNotExist("Uploaded file should not exist", file); + } + } finally { + abortUploadQuietly(upload, file); + } + } + /** * Trying to abort with a handle of size 0 must fail. */ @Test public void testAbortEmptyUploadHandle() throws Exception { ByteBuffer byteBuffer = ByteBuffer.wrap(new byte[0]); - UploadHandle uploadHandle = BBUploadHandle.from(byteBuffer); intercept(IllegalArgumentException.class, - () -> abortUpload(methodPath(), uploadHandle)); + () -> abortUpload(BBUploadHandle.from(byteBuffer), methodPath())); } /** @@ -553,10 +677,10 @@ public void testAbortEmptyUploadHandle() throws Exception { public void testCompleteEmptyUpload() throws Exception { describe("Expect an empty MPU to fail, but still be abortable"); Path dest = methodPath(); - UploadHandle handle = initializeUpload(dest); + UploadHandle handle = startUpload(dest); intercept(IllegalArgumentException.class, - () -> mpu.complete(dest, new HashMap<>(), handle)); - abortUpload(dest, handle); + () -> complete(uploader0, handle, dest, new HashMap<>())); + abortUpload(handle, dest); } /** @@ -571,7 +695,7 @@ public void testPutPartEmptyUploadID() throws Exception { byte[] payload = generatePayload(1); InputStream is = new ByteArrayInputStream(payload); intercept(IllegalArgumentException.class, - () -> mpu.putPart(dest, is, 1, emptyHandle, payload.length)); + () -> uploader0.putPart(emptyHandle, 1, dest, is, payload.length)); } /** @@ -581,7 +705,7 @@ public void testPutPartEmptyUploadID() throws Exception { public void testCompleteEmptyUploadID() throws Exception { describe("Expect IllegalArgumentException when complete uploadID is empty"); Path dest = methodPath(); - UploadHandle realHandle = initializeUpload(dest); + UploadHandle realHandle = startUpload(dest); UploadHandle emptyHandle = BBUploadHandle.from(ByteBuffer.wrap(new byte[0])); Map partHandles = new HashMap<>(); @@ -590,14 +714,14 @@ public void testCompleteEmptyUploadID() throws Exception { partHandles.put(1, partHandle); intercept(IllegalArgumentException.class, - () -> mpu.complete(dest, partHandles, emptyHandle)); + () -> complete(uploader0, emptyHandle, dest, partHandles)); // and, while things are setup, attempt to complete with // a part index of 0 partHandles.clear(); partHandles.put(0, partHandle); intercept(IllegalArgumentException.class, - () -> mpu.complete(dest, partHandles, realHandle)); + () -> complete(uploader0, realHandle, dest, partHandles)); } /** @@ -610,7 +734,7 @@ public void testCompleteEmptyUploadID() throws Exception { public void testDirectoryInTheWay() throws Exception { FileSystem fs = getFileSystem(); Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); int size = SMALL_FILE; PartHandle partHandle = putPart(file, uploadHandle, 1, @@ -622,7 +746,7 @@ public void testDirectoryInTheWay() throws Exception { () -> completeUpload(file, uploadHandle, partHandles, null, size)); // abort should still work - abortUpload(file, uploadHandle); + abortUpload(uploadHandle, file); } @Test @@ -630,46 +754,44 @@ public void testConcurrentUploads() throws Throwable { // if the FS doesn't support concurrent uploads, this test is // required to fail during the second initialization. - final boolean concurrent = supportsConcurrentUploadsToSamePath(); + boolean concurrent = supportsConcurrentUploadsToSamePath(); describe("testing concurrent uploads, MPU support for this is " + concurrent); - final FileSystem fs = getFileSystem(); - final Path file = methodPath(); - final int size1 = SMALL_FILE; - final int partId1 = 1; - final byte[] payload1 = generatePayload(partId1, size1); - final MessageDigest digest1 = DigestUtils.getMd5Digest(); + Path file = methodPath(); + int size1 = SMALL_FILE; + int partId1 = 1; + byte[] payload1 = generatePayload(partId1, size1); + MessageDigest digest1 = DigestUtils.getMd5Digest(); digest1.update(payload1); - final UploadHandle upload1 = initializeUpload(file); - final Map partHandles1 = new HashMap<>(); + UploadHandle upload1 = startUpload(file); + Map partHandles1 = new HashMap<>(); // initiate part 2 // by using a different size, it's straightforward to see which // version is visible, before reading/digesting the contents - final int size2 = size1 * 2; - final int partId2 = 2; - final byte[] payload2 = generatePayload(partId1, size2); - final MessageDigest digest2 = DigestUtils.getMd5Digest(); + int size2 = size1 * 2; + int partId2 = 2; + byte[] payload2 = generatePayload(partId1, size2); + MessageDigest digest2 = DigestUtils.getMd5Digest(); digest2.update(payload2); - final UploadHandle upload2; + UploadHandle upload2; try { - upload2 = initializeUpload(file); + upload2 = startUpload(file); Assume.assumeTrue( "The Filesystem is unexpectedly supporting concurrent uploads", concurrent); } catch (IOException e) { if (!concurrent) { // this is expected, so end the test - LOG.debug("Expected exception raised on concurrent uploads {}", e); + LOG.debug("Expected exception raised on concurrent uploads", e); return; } else { throw e; } } - final Map partHandles2 = new HashMap<>(); - + Map partHandles2 = new HashMap<>(); assertNotEquals("Upload handles match", upload1, upload2); @@ -686,13 +808,21 @@ public void testConcurrentUploads() throws Throwable { // now upload part 2. complete(file, upload2, partHandles2); // and await the visible length to match - eventually(timeToBecomeConsistentMillis(), 500, - () -> { - FileStatus status = fs.getFileStatus(file); - assertEquals("File length in " + status, - size2, status.getLen()); - }); + eventually(timeToBecomeConsistentMillis(), + () -> verifyFileLength(file, size2), + new LambdaTestUtils.ProportionalRetryInterval( + CONSISTENCY_INTERVAL, + timeToBecomeConsistentMillis())); verifyContents(file, digest2, size2); } + + @Test + public void testPathCapabilities() throws Throwable { + FileSystem fs = getFileSystem(); + Assertions.assertThat(fs.hasPathCapability(getContract().getTestPath(), + CommonPathCapabilities.FS_MULTIPART_UPLOADER)) + .describedAs("fs %s, lacks multipart upload capability", fs) + .isTrue(); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMultipartUploader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMultipartUploader.java deleted file mode 100644 index f675ddfa0db82..0000000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMultipartUploader.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.contract.localfs; - -import org.junit.Assume; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractMultipartUploaderTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -/** - * Test the FileSystemMultipartUploader on local file system. - */ -public class TestLocalFSContractMultipartUploader - extends AbstractContractMultipartUploaderTest { - - @Override - public void setup() throws Exception { - Assume.assumeTrue("Skipping until HDFS-13934", false); - super.setup(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new LocalFSContract(conf); - } - - /** - * There is no real need to upload any particular size. - * @return 1 kilobyte - */ - @Override - protected int partSizeInBytes() { - return 1024; - } - - @Override - protected boolean finalizeConsumesUploadIdImmediately() { - return true; - } - - @Override - protected boolean supportsConcurrentUploadsToSamePath() { - return true; - } -} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java index 136837fc801c4..56f5b2d997dc2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java @@ -39,7 +39,7 @@ public void testInvalidConfig() throws IOException, URISyntaxException { class Foo { } - new InodeTree(conf, null) { + new InodeTree(conf, null, null, false) { @Override protected Foo getTargetFileSystem(final URI uri) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java new file mode 100644 index 0000000000000..300fdd8b333f1 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java @@ -0,0 +1,151 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.*; + +/** + * ViewFsOverloadScheme ListStatus. + */ +public class TestViewFsOverloadSchemeListStatus { + + private static final File TEST_DIR = + GenericTestUtils.getTestDir(TestViewfsFileStatus.class.getSimpleName()); + private Configuration conf; + private static final String FILE_NAME = "file"; + + @Before + public void setUp() { + conf = new Configuration(); + conf.set(String.format("fs.%s.impl", FILE_NAME), + ViewFileSystemOverloadScheme.class.getName()); + conf.set(String + .format(FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, + FILE_NAME), LocalFileSystem.class.getName()); + FileUtil.fullyDelete(TEST_DIR); + assertTrue(TEST_DIR.mkdirs()); + } + + @After + public void tearDown() throws IOException { + FileUtil.fullyDelete(TEST_DIR); + } + + /** + * Tests the ACL and isDirectory returned from listStatus for directories and + * files. + */ + @Test + public void testListStatusACL() throws IOException, URISyntaxException { + String testfilename = "testFileACL"; + String childDirectoryName = "testDirectoryACL"; + TEST_DIR.mkdirs(); + File infile = new File(TEST_DIR, testfilename); + final byte[] content = "dingos".getBytes(); + + try (FileOutputStream fos = new FileOutputStream(infile)) { + fos.write(content); + } + assertEquals(content.length, infile.length()); + File childDir = new File(TEST_DIR, childDirectoryName); + childDir.mkdirs(); + + ConfigUtil.addLink(conf, "/file", infile.toURI()); + ConfigUtil.addLink(conf, "/dir", childDir.toURI()); + + String fileUriStr = "file:///"; + try (FileSystem vfs = FileSystem.get(new URI(fileUriStr), conf)) { + assertEquals(ViewFileSystemOverloadScheme.class, vfs.getClass()); + FileStatus[] statuses = vfs.listStatus(new Path("/")); + + FileSystem localFs = ((ViewFileSystemOverloadScheme) vfs) + .getRawFileSystem(new Path(fileUriStr), conf); + FileStatus fileStat = localFs.getFileStatus(new Path(infile.getPath())); + FileStatus dirStat = localFs.getFileStatus(new Path(childDir.getPath())); + for (FileStatus status : statuses) { + if (status.getPath().getName().equals(FILE_NAME)) { + assertEquals(fileStat.getPermission(), status.getPermission()); + } else { + assertEquals(dirStat.getPermission(), status.getPermission()); + } + } + + localFs.setPermission(new Path(infile.getPath()), + FsPermission.valueOf("-rwxr--r--")); + localFs.setPermission(new Path(childDir.getPath()), + FsPermission.valueOf("-r--rwxr--")); + + statuses = vfs.listStatus(new Path("/")); + for (FileStatus status : statuses) { + if (status.getPath().getName().equals(FILE_NAME)) { + assertEquals(FsPermission.valueOf("-rwxr--r--"), + status.getPermission()); + assertFalse(status.isDirectory()); + } else { + assertEquals(FsPermission.valueOf("-r--rwxr--"), + status.getPermission()); + assertTrue(status.isDirectory()); + } + } + } + } + + /** + * Tests that ViewFSOverloadScheme should consider initialized fs as fallback + * if there are no mount links configured. + */ + @Test(timeout = 30000) + public void testViewFSOverloadSchemeWithoutAnyMountLinks() throws Exception { + try (FileSystem fs = FileSystem.get(TEST_DIR.toPath().toUri(), conf)) { + ViewFileSystemOverloadScheme vfs = (ViewFileSystemOverloadScheme) fs; + assertEquals(0, vfs.getMountPoints().length); + Path testFallBack = new Path("test", FILE_NAME); + assertTrue(vfs.mkdirs(testFallBack)); + FileStatus[] status = vfs.listStatus(testFallBack.getParent()); + assertEquals(FILE_NAME, status[0].getPath().getName()); + assertEquals(testFallBack.getName(), + vfs.getFileLinkStatus(testFallBack).getPath().getName()); + } + } + + @AfterClass + public static void cleanup() throws IOException { + FileUtil.fullyDelete(TEST_DIR); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java index 2e498f2c0a023..fd5de72ed71ad 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java @@ -48,10 +48,9 @@ public void setUp() throws Exception { fcTarget = FileContext.getLocalFSFileContext(); super.setUp(); // this sets up conf (and fcView which we replace) - // Now create a viewfs using a mount table called "default" - // hence viewfs://default/ + // Now create a viewfs using a mount table using the {MOUNT_TABLE_NAME} schemeWithAuthority = - new URI(FsConstants.VIEWFS_SCHEME, "default", "/", null, null); + new URI(FsConstants.VIEWFS_SCHEME, MOUNT_TABLE_NAME, "/", null, null); fcView = FileContext.getFileContext(schemeWithAuthority, conf); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java index 0c31c8ed6a901..75557456edc3d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java @@ -29,10 +29,13 @@ import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; import org.junit.AfterClass; +import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; @@ -48,6 +51,17 @@ public class TestViewfsFileStatus { private static final File TEST_DIR = GenericTestUtils.getTestDir( TestViewfsFileStatus.class.getSimpleName()); + @Before + public void setUp() { + FileUtil.fullyDelete(TEST_DIR); + assertTrue(TEST_DIR.mkdirs()); + } + + @After + public void tearDown() throws IOException { + FileUtil.fullyDelete(TEST_DIR); + } + @Test public void testFileStatusSerialziation() throws IOException, URISyntaxException { @@ -56,38 +70,92 @@ public void testFileStatusSerialziation() File infile = new File(TEST_DIR, testfilename); final byte[] content = "dingos".getBytes(); - FileOutputStream fos = null; - try { - fos = new FileOutputStream(infile); + try (FileOutputStream fos = new FileOutputStream(infile)) { fos.write(content); - } finally { - if (fos != null) { - fos.close(); - } } assertEquals((long)content.length, infile.length()); Configuration conf = new Configuration(); ConfigUtil.addLink(conf, "/foo/bar/baz", TEST_DIR.toURI()); - FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf); - assertEquals(ViewFileSystem.class, vfs.getClass()); - Path path = new Path("/foo/bar/baz", testfilename); - FileStatus stat = vfs.getFileStatus(path); - assertEquals(content.length, stat.getLen()); - ContractTestUtils.assertNotErasureCoded(vfs, path); - assertTrue(path + " should have erasure coding unset in " + - "FileStatus#toString(): " + stat, - stat.toString().contains("isErasureCoded=false")); - - // check serialization/deserialization - DataOutputBuffer dob = new DataOutputBuffer(); - stat.write(dob); - DataInputBuffer dib = new DataInputBuffer(); - dib.reset(dob.getData(), 0, dob.getLength()); - FileStatus deSer = new FileStatus(); - deSer.readFields(dib); - assertEquals(content.length, deSer.getLen()); - assertFalse(deSer.isErasureCoded()); + try (FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf)) { + assertEquals(ViewFileSystem.class, vfs.getClass()); + Path path = new Path("/foo/bar/baz", testfilename); + FileStatus stat = vfs.getFileStatus(path); + assertEquals(content.length, stat.getLen()); + ContractTestUtils.assertNotErasureCoded(vfs, path); + assertTrue(path + " should have erasure coding unset in " + + "FileStatus#toString(): " + stat, + stat.toString().contains("isErasureCoded=false")); + + // check serialization/deserialization + DataOutputBuffer dob = new DataOutputBuffer(); + stat.write(dob); + DataInputBuffer dib = new DataInputBuffer(); + dib.reset(dob.getData(), 0, dob.getLength()); + FileStatus deSer = new FileStatus(); + deSer.readFields(dib); + assertEquals(content.length, deSer.getLen()); + assertFalse(deSer.isErasureCoded()); + } + } + + /** + * Tests the ACL returned from getFileStatus for directories and files. + * @throws IOException + */ + @Test + public void testListStatusACL() throws IOException { + String testfilename = "testFileACL"; + String childDirectoryName = "testDirectoryACL"; + TEST_DIR.mkdirs(); + File infile = new File(TEST_DIR, testfilename); + final byte[] content = "dingos".getBytes(); + + try (FileOutputStream fos = new FileOutputStream(infile)) { + fos.write(content); + } + assertEquals(content.length, infile.length()); + File childDir = new File(TEST_DIR, childDirectoryName); + childDir.mkdirs(); + + Configuration conf = new Configuration(); + ConfigUtil.addLink(conf, "/file", infile.toURI()); + ConfigUtil.addLink(conf, "/dir", childDir.toURI()); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + try (FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf)) { + assertEquals(ViewFileSystem.class, vfs.getClass()); + FileStatus[] statuses = vfs.listStatus(new Path("/")); + + FileSystem localFs = FileSystem.getLocal(conf); + FileStatus fileStat = localFs.getFileStatus(new Path(infile.getPath())); + FileStatus dirStat = localFs.getFileStatus(new Path(childDir.getPath())); + + for (FileStatus status : statuses) { + if (status.getPath().getName().equals("file")) { + assertEquals(fileStat.getPermission(), status.getPermission()); + } else { + assertEquals(dirStat.getPermission(), status.getPermission()); + } + } + + localFs.setPermission(new Path(infile.getPath()), + FsPermission.valueOf("-rwxr--r--")); + localFs.setPermission(new Path(childDir.getPath()), + FsPermission.valueOf("-r--rwxr--")); + + statuses = vfs.listStatus(new Path("/")); + for (FileStatus status : statuses) { + if (status.getPath().getName().equals("file")) { + assertEquals(FsPermission.valueOf("-rwxr--r--"), + status.getPermission()); + assertFalse(status.isDirectory()); + } else { + assertEquals(FsPermission.valueOf("-r--rwxr--"), + status.getPermission()); + assertTrue(status.isDirectory()); + } + } + } } // Tests that ViewFileSystem.getFileChecksum calls res.targetFileSystem diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java index 59588a527f46e..05d7974395013 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.fs.viewfs; +import java.io.File; import java.io.FileNotFoundException; +import java.io.FileOutputStream; import java.io.IOException; import java.net.URI; import java.security.PrivilegedExceptionAction; @@ -32,6 +34,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockStoragePolicySpi; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; @@ -57,6 +61,8 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assume; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.hadoop.fs.FileSystemTestHelper.*; @@ -109,6 +115,9 @@ protected FileSystemTestHelper createFileSystemHelper() { return new FileSystemTestHelper(); } + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + @Before public void setUp() throws Exception { initializeTargetTestRoot(); @@ -1369,4 +1378,56 @@ public void testDeleteOnExit() throws Exception { viewFs.close(); assertFalse(fsTarget.exists(realTestPath)); } + + @Test + public void testGetContentSummary() throws IOException { + ContentSummary summaryBefore = + fsView.getContentSummary(new Path("/internalDir")); + String expected = "GET CONTENT SUMMARY"; + Path filePath = + new Path("/internalDir/internalDir2/linkToDir3", "foo"); + + try (FSDataOutputStream outputStream = fsView.create(filePath)) { + outputStream.write(expected.getBytes()); + } + + Path newDirPath = new Path("/internalDir/linkToDir2", "bar"); + fsView.mkdirs(newDirPath); + + ContentSummary summaryAfter = + fsView.getContentSummary(new Path("/internalDir")); + assertEquals("The file count didn't match", + summaryBefore.getFileCount() + 1, + summaryAfter.getFileCount()); + assertEquals("The size didn't match", + summaryBefore.getLength() + expected.length(), + summaryAfter.getLength()); + assertEquals("The directory count didn't match", + summaryBefore.getDirectoryCount() + 1, + summaryAfter.getDirectoryCount()); + } + + @Test + public void testGetContentSummaryWithFileInLocalFS() throws Exception { + ContentSummary summaryBefore = + fsView.getContentSummary(new Path("/internalDir")); + String expected = "GET CONTENT SUMMARY"; + File localFile = temporaryFolder.newFile("localFile"); + try (FileOutputStream fos = new FileOutputStream(localFile)) { + fos.write(expected.getBytes()); + } + ConfigUtil.addLink(conf, + "/internalDir/internalDir2/linkToLocalFile", localFile.toURI()); + + try (FileSystem fs = FileSystem.get(FsConstants.VIEWFS_URI, conf)) { + ContentSummary summaryAfter = + fs.getContentSummary(new Path("/internalDir")); + assertEquals("The file count didn't match", + summaryBefore.getFileCount() + 1, + summaryAfter.getFileCount()); + assertEquals("The directory count didn't match", + summaryBefore.getLength() + expected.length(), + summaryAfter.getLength()); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java index d96cdb172b702..21b0c159e2aae 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java @@ -56,6 +56,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.FileContextTestHelper.fileType; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; @@ -69,6 +70,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -95,6 +97,8 @@ *

*/ abstract public class ViewFsBaseTest { + protected static final String MOUNT_TABLE_NAME = "mycluster"; + FileContext fcView; // the view file system - the mounts are here FileContext fcTarget; // the target file system - the mount will point here Path targetTestRoot; @@ -128,6 +132,9 @@ public void setUp() throws Exception { // Set up the defaultMT in the config with our mount point links conf = new Configuration(); + conf.set( + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY, + MOUNT_TABLE_NAME); ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri()); ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot,"user").toUri()); @@ -1001,4 +1008,23 @@ static AbstractFileSystem getMockFs(URI uri) { return mockFs; } } + + @Test + public void testListStatusWithNoGroups() throws Exception { + final UserGroupInformation userUgi = UserGroupInformation + .createUserForTesting("user@HADOOP.COM", new String[] {}); + userUgi.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + URI viewFsUri = new URI( + FsConstants.VIEWFS_SCHEME, MOUNT_TABLE_NAME, "/", null, null); + FileSystem vfs = FileSystem.get(viewFsUri, conf); + LambdaTestUtils.intercept(IOException.class, + "There is no primary group for UGI", () -> vfs + .listStatus(new Path(viewFsUri.toString() + "internalDir"))); + return null; + } + }); + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java index efced73943ed5..b2d7416aa7675 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java @@ -153,7 +153,7 @@ static void addMountLinksToFile(String mountTable, String[] sources, String prefix = new StringBuilder(Constants.CONFIG_VIEWFS_PREFIX).append(".") .append((mountTable == null - ? Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE + ? ConfigUtil.getDefaultMountTableName(conf) : mountTable)) .append(".").toString(); out.writeBytes(""); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java index 6505fbb8224f8..0c2530739fa49 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB; import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; @@ -119,7 +119,7 @@ private InetSocketAddress startAndGetRPCServerAddress(InetSocketAddress serverAd try { RPC.setProtocolEngine(conf, - HAServiceProtocolPB.class, ProtobufRpcEngine.class); + HAServiceProtocolPB.class, ProtobufRpcEngine2.class); HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator = new HAServiceProtocolServerSideTranslatorPB(new MockHAProtocolImpl()); BlockingService haPbService = HAServiceProtocolService diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java index 791aaad59e990..3f027fa1c598a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java @@ -177,7 +177,7 @@ public void testFailoverFromFaultyServiceSucceeds() throws Exception { } // svc1 still thinks it's active, that's OK, it was fenced - assertEquals(1, AlwaysSucceedFencer.fenceCalled); + assertEquals(2, AlwaysSucceedFencer.fenceCalled); assertSame(svc1, AlwaysSucceedFencer.fencedSvc); assertEquals(HAServiceState.ACTIVE, svc1.state); assertEquals(HAServiceState.ACTIVE, svc2.state); @@ -201,7 +201,7 @@ public void testFailoverFromFaultyServiceFencingFailure() throws Exception { } assertEquals(1, AlwaysFailFencer.fenceCalled); - assertSame(svc1, AlwaysFailFencer.fencedSvc); + assertSame(svc2, AlwaysFailFencer.fencedSvc); assertEquals(HAServiceState.ACTIVE, svc1.state); assertEquals(HAServiceState.STANDBY, svc2.state); } @@ -223,7 +223,7 @@ public void testFencingFailureDuringFailover() throws Exception { // If fencing was requested and it failed we don't try to make // svc2 active anyway, and we don't failback to svc1. assertEquals(1, AlwaysFailFencer.fenceCalled); - assertSame(svc1, AlwaysFailFencer.fencedSvc); + assertSame(svc2, AlwaysFailFencer.fencedSvc); assertEquals(HAServiceState.STANDBY, svc1.state); assertEquals(HAServiceState.STANDBY, svc2.state); } @@ -344,7 +344,7 @@ public void testWeFenceOnFailbackIfTransitionToActiveFails() throws Exception { // and we didn't force it, so we failed back to svc1 and fenced svc2. // Note svc2 still thinks it's active, that's OK, we fenced it. assertEquals(HAServiceState.ACTIVE, svc1.state); - assertEquals(1, AlwaysSucceedFencer.fenceCalled); + assertEquals(2, AlwaysSucceedFencer.fenceCalled); assertSame(svc2, AlwaysSucceedFencer.fencedSvc); } @@ -373,7 +373,7 @@ public void testFailureToFenceOnFailbackFailsTheFailback() throws Exception { // so we did not failback to svc1, ie it's still standby. assertEquals(HAServiceState.STANDBY, svc1.state); assertEquals(1, AlwaysFailFencer.fenceCalled); - assertSame(svc2, AlwaysFailFencer.fencedSvc); + assertSame(svc1, AlwaysFailFencer.fencedSvc); } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java index 3a2cf052a60a8..fc36b1dd846a4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java @@ -163,6 +163,37 @@ public void testTargetAsEnvironment() { } } + /** + * Test if fencing target has peer set, the failover can trigger different + * commands on source and destination respectively. + */ + @Test + public void testEnvironmentWithPeer() { + HAServiceTarget target = new DummyHAService(HAServiceState.ACTIVE, + new InetSocketAddress("dummytarget", 1111)); + HAServiceTarget source = new DummyHAService(HAServiceState.STANDBY, + new InetSocketAddress("dummysource", 2222)); + target.setTransitionTargetHAStatus(HAServiceState.ACTIVE); + source.setTransitionTargetHAStatus(HAServiceState.STANDBY); + String cmd = "echo $target_host $target_port," + + "echo $source_host $source_port"; + if (!Shell.WINDOWS) { + fencer.tryFence(target, cmd); + Mockito.verify(ShellCommandFencer.LOG).info( + Mockito.contains("echo $ta...rget_port: dummytarget 1111")); + fencer.tryFence(source, cmd); + Mockito.verify(ShellCommandFencer.LOG).info( + Mockito.contains("echo $so...urce_port: dummysource 2222")); + } else { + fencer.tryFence(target, cmd); + Mockito.verify(ShellCommandFencer.LOG).info( + Mockito.contains("echo %ta...get_port%: dummytarget 1111")); + fencer.tryFence(source, cmd); + Mockito.verify(ShellCommandFencer.LOG).info( + Mockito.contains("echo %so...urce_port%: dummysource 2222")); + } + } + /** * Test that we properly close off our input to the subprocess diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java index 63c87830b4529..d4f548c88b9d9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java @@ -22,7 +22,7 @@ import java.net.InetSocketAddress; import java.security.NoSuchAlgorithmException; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java index e0c87e93a9ac0..ad9617dca79de 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java @@ -62,8 +62,10 @@ import java.util.Arrays; import java.util.Enumeration; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import java.util.concurrent.CountDownLatch; @@ -410,6 +412,13 @@ static void clearMapping() { public List getGroups(String user) throws IOException { return mapping.get(user); } + + @Override + public Set getGroupsSet(String user) throws IOException { + Set result = new HashSet(); + result.addAll(mapping.get(user)); + return result; + } } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServerConfigs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServerConfigs.java index e88eba342874c..039fae0195730 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServerConfigs.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServerConfigs.java @@ -18,7 +18,7 @@ package org.apache.hadoop.http; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import java.io.File; import java.io.IOException; import java.net.URI; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java index bbb4ec21812e3..e7130d4da8cb3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java @@ -66,7 +66,7 @@ private static class MyOptions { public int secondsToRun = 15; private int msgSize = 1024; public Class rpcEngine = - ProtobufRpcEngine.class; + ProtobufRpcEngine2.class; private MyOptions(String args[]) { try { @@ -181,7 +181,7 @@ private void processOptions(CommandLine line, Options opts) if (line.hasOption('e')) { String eng = line.getOptionValue('e'); if ("protobuf".equals(eng)) { - rpcEngine = ProtobufRpcEngine.class; + rpcEngine = ProtobufRpcEngine2.class; } else { throw new ParseException("invalid engine: " + eng); } @@ -224,7 +224,7 @@ private Server startServer(MyOptions opts) throws IOException { RPC.Server server; // Get RPC server for server side implementation - if (opts.rpcEngine == ProtobufRpcEngine.class) { + if (opts.rpcEngine == ProtobufRpcEngine2.class) { // Create server side implementation PBServerImpl serverImpl = new PBServerImpl(); BlockingService service = TestProtobufRpcProto @@ -378,7 +378,7 @@ private interface RpcServiceWrapper { private RpcServiceWrapper createRpcClient(MyOptions opts) throws IOException { InetSocketAddress addr = NetUtils.createSocketAddr(opts.host, opts.getPort()); - if (opts.rpcEngine == ProtobufRpcEngine.class) { + if (opts.rpcEngine == ProtobufRpcEngine2.class) { final TestRpcService proxy = RPC.getProxy(TestRpcService.class, 0, addr, conf); return new RpcServiceWrapper() { @Override diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java index 82540637a2004..2b9828bec6954 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java @@ -98,7 +98,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import com.google.common.primitives.Bytes; import com.google.common.primitives.Ints; import org.slf4j.Logger; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java index 10e23baefef9b..c1b0858697682 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java @@ -45,7 +45,7 @@ public void testPBService() throws Exception { // Set RPC engine to protobuf RPC engine Configuration conf2 = new Configuration(); RPC.setProtocolEngine(conf2, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); TestRpcService client = RPC.getProxy(TestRpcService.class, 0, addr, conf2); TestProtoBufRpc.testProtoBufRpc(client); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java index dfb9e934f6055..d813c6b784f5d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java @@ -25,8 +25,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto; -import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto; import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto; import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto; import org.apache.hadoop.ipc.protobuf.TestProtos.OptRequestProto; @@ -138,7 +136,7 @@ public void testProtocolVersionMismatch() throws IOException, ServiceException { conf = new Configuration(); conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024); // Set RPC engine to protobuf RPC engine - RPC.setProtocolEngine(conf, NewRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, NewRpcService.class, ProtobufRpcEngine2.class); // Create server side implementation NewServerImpl serverImpl = new NewServerImpl(); @@ -151,7 +149,7 @@ public void testProtocolVersionMismatch() throws IOException, ServiceException { server.start(); - RPC.setProtocolEngine(conf, OldRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, OldRpcService.class, ProtobufRpcEngine2.class); OldRpcService proxy = RPC.getProxy(OldRpcService.class, 0, addr, conf); // Verify that exception is thrown if protocolVersion is mismatch between @@ -168,7 +166,8 @@ public void testProtocolVersionMismatch() throws IOException, ServiceException { } // Verify that missing of optional field is still compatible in RPC call. - RPC.setProtocolEngine(conf, NewerRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, NewerRpcService.class, + ProtobufRpcEngine2.class); NewerRpcService newProxy = RPC.getProxy(NewerRpcService.class, 0, addr, conf); newProxy.echo(null, emptyRequest); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java index facb8fdd8b191..06c3646310412 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java @@ -94,8 +94,9 @@ public void setUp() throws IOException { // Setup server for both protocols conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024); conf.setBoolean(CommonConfigurationKeys.IPC_SERVER_LOG_SLOW_RPC, true); // Set RPC engine to protobuf RPC engine - RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class); - RPC.setProtocolEngine(conf, TestRpcService2.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class); + RPC.setProtocolEngine(conf, TestRpcService2.class, + ProtobufRpcEngine2.class); // Create server side implementation PBServerImpl serverImpl = new PBServerImpl(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java index 32300d4f876e1..922e9192c41c6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java @@ -52,7 +52,7 @@ public void test() throws Exception { TestProtobufRpcHandoffProto.newReflectiveBlockingService(serverImpl); RPC.setProtocolEngine(conf, TestProtoBufRpcServerHandoffProtocol.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); RPC.Server server = new RPC.Builder(conf) .setProtocol(TestProtoBufRpcServerHandoffProtocol.class) .setInstance(blockingService) @@ -144,8 +144,8 @@ public static class TestProtoBufRpcServerHandoffServer TestProtos.SleepRequestProto2 request) throws ServiceException { final long startTime = System.currentTimeMillis(); - final ProtobufRpcEngineCallback callback = - ProtobufRpcEngine.Server.registerForDeferredResponse(); + final ProtobufRpcEngineCallback2 callback = + ProtobufRpcEngine2.Server.registerForDeferredResponse(); final long sleepTime = request.getSleepTime(); new Thread() { @Override diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java index 640ca3d2b89ed..cd2433a8aff10 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ipc.Client.ConnectionId; import org.apache.hadoop.ipc.Server.Call; import org.apache.hadoop.ipc.Server.Connection; +import org.apache.hadoop.ipc.metrics.RpcMetrics; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto; import org.apache.hadoop.ipc.protobuf.TestProtos; @@ -81,6 +82,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; @@ -1095,7 +1097,9 @@ public TestRpcService run() { proxy.lockAndSleep(null, newSleepRequest(5)); rpcMetrics = getMetrics(server.getRpcMetrics().name()); - assertGauge("RpcLockWaitTimeAvgTime", 10000.0, rpcMetrics); + assertGauge("RpcLockWaitTimeAvgTime", + (double)(RpcMetrics.TIMEUNIT.convert(10L, TimeUnit.SECONDS)), + rpcMetrics); } finally { if (proxy2 != null) { RPC.stopProxy(proxy2); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java index ffee086fa9801..22fdcbbe14e65 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java @@ -114,19 +114,19 @@ public void setUp() { ProtocolSignature.resetCache(); RPC.setProtocolEngine(conf, - TestProtocol0.class, ProtobufRpcEngine.class); + TestProtocol0.class, ProtobufRpcEngine2.class); RPC.setProtocolEngine(conf, - TestProtocol1.class, ProtobufRpcEngine.class); + TestProtocol1.class, ProtobufRpcEngine2.class); RPC.setProtocolEngine(conf, - TestProtocol2.class, ProtobufRpcEngine.class); + TestProtocol2.class, ProtobufRpcEngine2.class); RPC.setProtocolEngine(conf, - TestProtocol3.class, ProtobufRpcEngine.class); + TestProtocol3.class, ProtobufRpcEngine2.class); RPC.setProtocolEngine(conf, - TestProtocol4.class, ProtobufRpcEngine.class); + TestProtocol4.class, ProtobufRpcEngine2.class); } @After diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java index d810fe3c5a1e0..90973d2674c01 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java @@ -44,7 +44,7 @@ public class TestRPCWaitForProxy extends TestRpcBase { @Before public void setupProtocolEngine() { RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestReuseRpcConnections.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestReuseRpcConnections.java index 2729dc3cd9daa..65558a7980a2d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestReuseRpcConnections.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestReuseRpcConnections.java @@ -26,7 +26,6 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.TestConnectionRetryPolicy; import org.apache.hadoop.ipc.Client.ConnectionId; -import org.apache.hadoop.ipc.TestRpcBase.TestRpcService; import org.junit.Before; import org.junit.Test; @@ -129,7 +128,7 @@ private void verifyRetryPolicyReuseConnections( try { proxy1 = getClient(addr, newConf, retryPolicy1); proxy1.ping(null, newEmptyRequest()); - client = ProtobufRpcEngine.getClient(newConf); + client = ProtobufRpcEngine2.getClient(newConf); final Set conns = client.getConnectionIds(); assertEquals("number of connections in cache is wrong", 1, conns.size()); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java index bf24d680dde2e..010935b60960c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java @@ -70,7 +70,7 @@ public class TestRpcBase { protected void setupConf() { conf = new Configuration(); // Set RPC engine to protobuf RPC engine - RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java index 72f73822b6fd0..5f944574656ac 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java @@ -169,7 +169,7 @@ public void setup() { clientFallBackToSimpleAllowed = true; // Set RPC engine to protobuf RPC engine - RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class); } static String getQOPNames (QualityOfProtection[] qops){ @@ -356,7 +356,7 @@ public void testPerConnectionConf() throws Exception { newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[0]); proxy1 = getClient(addr, newConf); proxy1.getAuthMethod(null, newEmptyRequest()); - client = ProtobufRpcEngine.getClient(newConf); + client = ProtobufRpcEngine2.getClient(newConf); Set conns = client.getConnectionIds(); assertEquals("number of connections in cache is wrong", 1, conns.size()); // same conf, connection should be re-used diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/MetricsRecords.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/MetricsRecords.java index 5d52cad66bb90..786571441fd1b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/MetricsRecords.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/MetricsRecords.java @@ -18,8 +18,8 @@ package org.apache.hadoop.metrics2.impl; -import com.google.common.base.Predicate; -import com.google.common.collect.Iterables; +import java.util.function.Predicate; +import java.util.stream.StreamSupport; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.MetricsTag; @@ -65,16 +65,22 @@ public static void assertMetricNotNull(MetricsRecord record, resourceLimitMetric); } - private static MetricsTag getFirstTagByName(MetricsRecord record, String name) { - return Iterables.getFirst(Iterables.filter(record.tags(), - new MetricsTagPredicate(name)), null); + private static MetricsTag getFirstTagByName(MetricsRecord record, + String name) { + if (record.tags() == null) { + return null; + } + return record.tags().stream().filter( + new MetricsTagPredicate(name)).findFirst().orElse(null); } private static AbstractMetric getFirstMetricByName( MetricsRecord record, String name) { - return Iterables.getFirst( - Iterables.filter(record.metrics(), new AbstractMetricPredicate(name)), - null); + if (record.metrics() == null) { + return null; + } + return StreamSupport.stream(record.metrics().spliterator(), false) + .filter(new AbstractMetricPredicate(name)).findFirst().orElse(null); } private static class MetricsTagPredicate implements Predicate { @@ -86,7 +92,7 @@ public MetricsTagPredicate(String tagName) { } @Override - public boolean apply(MetricsTag input) { + public boolean test(MetricsTag input) { return input.name().equals(tagName); } } @@ -101,7 +107,7 @@ public AbstractMetricPredicate( } @Override - public boolean apply(AbstractMetric input) { + public boolean test(AbstractMetric input) { return input.name().equals(metricName); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java index 47a3b4cdc092b..8ebcc0bb0cca5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java @@ -23,9 +23,7 @@ import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.*; - -import javax.annotation.Nullable; - +import java.util.stream.StreamSupport; import org.junit.Test; import org.junit.runner.RunWith; @@ -38,8 +36,7 @@ import static org.junit.Assert.*; import static org.mockito.Mockito.*; -import com.google.common.base.Predicate; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import com.google.common.collect.Iterables; import org.apache.commons.configuration2.SubsetConfiguration; @@ -59,7 +56,6 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.metrics2.lib.MutableGaugeLong; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -246,13 +242,9 @@ public void run() { for (Thread t : threads) t.join(); assertEquals(0L, ms.droppedPubAll.value()); - assertTrue(StringUtils.join("\n", Arrays.asList(results)), - Iterables.all(Arrays.asList(results), new Predicate() { - @Override - public boolean apply(@Nullable String input) { - return input.equalsIgnoreCase("Passed"); - } - })); + assertTrue(String.join("\n", Arrays.asList(results)), + Arrays.asList(results).stream().allMatch( + input -> input.equalsIgnoreCase("Passed"))); ms.stop(); ms.shutdown(); } @@ -482,14 +474,12 @@ public Object answer(InvocationOnMock invocation) throws Throwable { ms.onTimerEvent(); verify(dataSink, timeout(500).times(2)).putMetrics(r1.capture()); List mr = r1.getAllValues(); - Number qSize = Iterables.find(mr.get(1).metrics(), - new Predicate() { - @Override - public boolean apply(@Nullable AbstractMetric input) { - assert input != null; - return input.name().equals("Sink_slowSinkQsize"); - } - }).value(); + Number qSize = StreamSupport.stream(mr.get(1).metrics().spliterator(), + false).filter( + input -> { + assert input != null; + return input.name().equals("Sink_slowSinkQsize"); + }).findFirst().get().value(); assertEquals(1, qSize); } finally { proceedSignal.countDown(); @@ -639,4 +629,25 @@ private static class TestSource2 { private static String getPluginUrlsAsString() { return "file:metrics2-test-plugin.jar"; } + + @Test + public void testMetricSystemRestart() { + MetricsSystemImpl ms = new MetricsSystemImpl("msRestartTestSystem"); + TestSink ts = new TestSink(); + String sinkName = "restartTestSink"; + + try { + ms.start(); + ms.register(sinkName, "", ts); + assertNotNull("no adapter exists for " + sinkName, + ms.getSinkAdapter(sinkName)); + ms.stop(); + + ms.start(); + assertNotNull("no adapter exists for " + sinkName, + ms.getSinkAdapter(sinkName)); + } finally { + ms.stop(); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRollingAverages.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRollingAverages.java index 9bfdd73bee832..ad90c1860514a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRollingAverages.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRollingAverages.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.metrics2.lib; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.test.GenericTestUtils; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCompositeGroupMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCompositeGroupMapping.java index 0a2d42c27329a..1803fb1a05806 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCompositeGroupMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCompositeGroupMapping.java @@ -22,7 +22,9 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashSet; import java.util.List; +import java.util.Set; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; @@ -87,13 +89,22 @@ public void cacheGroupsRefresh() throws IOException { public void cacheGroupsAdd(List groups) throws IOException { } - + protected List toList(String group) { if (group != null) { return Arrays.asList(new String[] {group}); } return new ArrayList(); } + + protected Set toSet(String group) { + if (group != null) { + Set result = new HashSet<>(); + result.add(group); + return result; + } + return new HashSet(); + } protected void checkTestConf(String expectedValue) { String configValue = getConf().get(PROVIDER_SPECIFIC_CONF_KEY); @@ -106,32 +117,49 @@ protected void checkTestConf(String expectedValue) { private static class UserProvider extends GroupMappingProviderBase { @Override public List getGroups(String user) throws IOException { + return toList(getGroupInternal(user)); + } + + @Override + public Set getGroupsSet(String user) throws IOException { + return toSet(getGroupInternal(user)); + } + + private String getGroupInternal(String user) throws IOException { checkTestConf(PROVIDER_SPECIFIC_CONF_VALUE_FOR_USER); - + String group = null; if (user.equals(john.name)) { group = john.group; } else if (user.equals(jack.name)) { group = jack.group; } - - return toList(group); + return group; } } private static class ClusterProvider extends GroupMappingProviderBase { @Override public List getGroups(String user) throws IOException { + return toList(getGroupsInternal(user)); + } + + @Override + public Set getGroupsSet(String user) throws IOException { + return toSet(getGroupsInternal(user)); + } + + private String getGroupsInternal(String user) throws IOException { checkTestConf(PROVIDER_SPECIFIC_CONF_VALUE_FOR_CLUSTER); - + String group = null; if (user.equals(hdfs.name)) { group = hdfs.group; } else if (user.equals(jack.name)) { // jack has another group from clusterProvider group = jack.group2; } - - return toList(group); + return group; + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java index c86b9ae344195..edd537011c4a8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java @@ -21,7 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.TestRpcBase; @@ -151,7 +151,7 @@ public void testRealUserSetup() throws IOException { configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME); // Set RPC engine to protobuf RPC engine RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 5); @@ -181,7 +181,7 @@ public void testRealUserAuthorizationSuccess() throws IOException { getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1"); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 5); @@ -215,7 +215,7 @@ public void testRealUserIPAuthorizationFailure() throws IOException { getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1"); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 5); @@ -251,7 +251,7 @@ public void testRealUserIPNotSpecified() throws IOException { conf.setStrings(DefaultImpersonationProvider.getTestProvider(). getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1"); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 2); @@ -286,7 +286,7 @@ public void testRealUserGroupNotSpecified() throws IOException { final Configuration conf = new Configuration(); configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 2); @@ -322,7 +322,7 @@ public void testRealUserGroupAuthorizationFailure() throws IOException { getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group3"); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 2); @@ -363,7 +363,7 @@ public void testProxyWithToken() throws Exception { TestTokenSecretManager sm = new TestTokenSecretManager(); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 5, sm); @@ -411,7 +411,7 @@ public void testTokenBySuperUser() throws Exception { SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, newConf); // Set RPC engine to protobuf RPC engine RPC.setProtocolEngine(newConf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(newConf); final Server server = setupTestServer(newConf, 5, sm); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java index 46e9f92258502..87788691f6d1b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java @@ -21,9 +21,9 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashSet; -import java.util.LinkedList; import java.util.List; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -34,7 +34,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -75,7 +75,7 @@ public static class FakeGroupMapping extends ShellBasedUnixGroupsMapping { private static volatile CountDownLatch latch = null; @Override - public List getGroups(String user) throws IOException { + public Set getGroupsSet(String user) throws IOException { TESTLOG.info("Getting groups for " + user); delayIfNecessary(); @@ -86,9 +86,14 @@ public List getGroups(String user) throws IOException { } if (blackList.contains(user)) { - return new LinkedList(); + return Collections.emptySet(); } - return new LinkedList(allGroups); + return new LinkedHashSet<>(allGroups); + } + + @Override + public List getGroups(String user) throws IOException { + return new ArrayList<>(getGroupsSet(user)); } /** @@ -129,7 +134,7 @@ public static void clearAll() throws IOException { TESTLOG.info("Resetting FakeGroupMapping"); blackList.clear(); allGroups.clear(); - requestCount = 0; + resetRequestCount(); getGroupsDelayMs = 0; throwException = false; latch = null; @@ -197,6 +202,12 @@ public List getGroups(String user) throws IOException { throw new IOException("For test"); } + @Override + public Set getGroupsSet(String user) throws IOException { + requestCount++; + throw new IOException("For test"); + } + public static int getRequestCount() { return requestCount; } @@ -550,7 +561,7 @@ public void testExceptionOnBackgroundRefreshHandled() throws Exception { FakeGroupMapping.clearBlackList(); // We make an initial request to populate the cache - groups.getGroups("me"); + List g1 = groups.getGroups("me"); // add another group groups.cacheGroupsAdd(Arrays.asList("grp3")); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestRuleBasedLdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestRuleBasedLdapGroupsMapping.java index cd04ae09e3148..8862fd7b60984 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestRuleBasedLdapGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestRuleBasedLdapGroupsMapping.java @@ -24,7 +24,9 @@ import javax.naming.NamingException; import java.util.ArrayList; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; import static org.apache.hadoop.security.RuleBasedLdapGroupsMapping .CONVERSION_RULE_KEY; @@ -40,7 +42,7 @@ public class TestRuleBasedLdapGroupsMapping { public void testGetGroupsToUpper() throws NamingException { RuleBasedLdapGroupsMapping groupsMapping = Mockito.spy( new RuleBasedLdapGroupsMapping()); - List groups = new ArrayList<>(); + Set groups = new LinkedHashSet<>(); groups.add("group1"); groups.add("group2"); Mockito.doReturn(groups).when((LdapGroupsMapping) groupsMapping) @@ -61,7 +63,7 @@ public void testGetGroupsToUpper() throws NamingException { public void testGetGroupsToLower() throws NamingException { RuleBasedLdapGroupsMapping groupsMapping = Mockito.spy( new RuleBasedLdapGroupsMapping()); - List groups = new ArrayList<>(); + Set groups = new LinkedHashSet<>(); groups.add("GROUP1"); groups.add("GROUP2"); Mockito.doReturn(groups).when((LdapGroupsMapping) groupsMapping) @@ -82,7 +84,7 @@ public void testGetGroupsToLower() throws NamingException { public void testGetGroupsInvalidRule() throws NamingException { RuleBasedLdapGroupsMapping groupsMapping = Mockito.spy( new RuleBasedLdapGroupsMapping()); - List groups = new ArrayList<>(); + Set groups = new LinkedHashSet<>(); groups.add("group1"); groups.add("GROUP2"); Mockito.doReturn(groups).when((LdapGroupsMapping) groupsMapping) @@ -93,7 +95,7 @@ public void testGetGroupsInvalidRule() throws NamingException { conf.set(CONVERSION_RULE_KEY, "none"); groupsMapping.setConf(conf); - Assert.assertEquals(groups, groupsMapping.getGroups("admin")); + Assert.assertEquals(groups, groupsMapping.getGroupsSet("admin")); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java index 3fb203ee2b93b..441f552649298 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java @@ -21,7 +21,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.junit.BeforeClass; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java index b2e177976b6d5..53973055336f2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java @@ -24,7 +24,7 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.curator.RetryPolicy; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFrameworkFactory; @@ -59,15 +59,15 @@ public class TestZKDelegationTokenSecretManager { private static final Logger LOG = LoggerFactory.getLogger(TestZKDelegationTokenSecretManager.class); - private static final int TEST_RETRIES = 2; + protected static final int TEST_RETRIES = 2; - private static final int RETRY_COUNT = 5; + protected static final int RETRY_COUNT = 5; - private static final int RETRY_WAIT = 1000; + protected static final int RETRY_WAIT = 1000; - private static final long DAY_IN_SECS = 86400; + protected static final long DAY_IN_SECS = 86400; - private TestingServer zkServer; + protected TestingServer zkServer; @Rule public Timeout globalTimeout = new Timeout(300000); @@ -425,7 +425,7 @@ private void verifyACL(CuratorFramework curatorFramework, // cancelled but.. that would mean having to make an RPC call for every // verification request. // Thus, the eventual consistency tradef-off should be acceptable here... - private void verifyTokenFail(DelegationTokenManager tm, + protected void verifyTokenFail(DelegationTokenManager tm, Token token) throws IOException, InterruptedException { verifyTokenFailWithRetry(tm, token, RETRY_COUNT); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java index 9e91634873607..79fc0083a0cc1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java @@ -32,12 +32,14 @@ import java.lang.reflect.InvocationTargetException; import java.util.Arrays; import java.util.Locale; +import java.util.Objects; import java.util.Random; import java.util.Set; import java.util.Enumeration; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; import java.util.regex.Pattern; import org.apache.commons.io.IOUtils; @@ -61,7 +63,6 @@ import org.mockito.stubbing.Answer; import com.google.common.base.Joiner; -import com.google.common.base.Supplier; import com.google.common.collect.Sets; /** @@ -88,7 +89,8 @@ public abstract class GenericTestUtils { public static final String DEFAULT_TEST_DATA_PATH = "target/test/data/"; /** - * Error string used in {@link GenericTestUtils#waitFor(Supplier, int, int)}. + * Error string used in + * {@link GenericTestUtils#waitFor(Supplier, long, long)}. */ public static final String ERROR_MISSING_ARGUMENT = "Input supplier interface should be initailized"; @@ -380,9 +382,7 @@ public static void assertExceptionContains(String expectedText, public static void waitFor(final Supplier check, final long checkEveryMillis, final long waitForMillis) throws TimeoutException, InterruptedException { - if (check == null) { - throw new NullPointerException(ERROR_MISSING_ARGUMENT); - } + Objects.requireNonNull(check, ERROR_MISSING_ARGUMENT); if (waitForMillis < checkEveryMillis) { throw new IllegalArgumentException(ERROR_INVALID_ARGUMENT); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java index fb7bd22fedfc9..8489e3d24f368 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java @@ -23,7 +23,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.slf4j.event.Level; import static org.junit.Assert.assertEquals; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java index d4599b0ecc2d7..d87da0ac301c7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.tracing; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.test.GenericTestUtils; import org.apache.htrace.core.Span; import org.apache.htrace.core.SpanId; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java index 578d267114128..c9f398da563e2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.util; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.commons.io.FileUtils; import org.apache.hadoop.security.alias.AbstractJavaKeyStoreProvider; import org.junit.Assert; diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index 9190df27ccc2c..5c18c772a1fe8 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.crypto.key.kms.server; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import com.google.common.cache.LoadingCache; import org.apache.curator.test.TestingServer; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java index 290f2c0e6766f..4162b198fb124 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -135,6 +135,14 @@ public FileChecksum getFileChecksum(Path f) return dfs.getFileChecksumWithCombineMode(getUriPath(f), Long.MAX_VALUE); } + /** + * {@inheritDoc} + * + * If the given path is a symlink, the path will be resolved to a target path + * and it will get the resolved path's FileStatus object. It will not be + * represented as a symlink and isDirectory API returns true if the resolved + * path is a directory, false otherwise. + */ @Override public FileStatus getFileStatus(Path f) throws IOException, UnresolvedLinkException { @@ -269,6 +277,20 @@ public HdfsFileStatus getNext() throws IOException { } } + /** + * {@inheritDoc} + * + * If any of the the immediate children of the given path f is a symlink, the + * returned FileStatus object of that children would be represented as a + * symlink. It will not be resolved to the target path and will not get the + * target path FileStatus object. The target path will be available via + * getSymlink on that children's FileStatus object. Since it represents as + * symlink, isDirectory on that children's FileStatus will return false. + * + * If you want to get the FileStatus of target path for that children, you may + * want to use GetFileStatus API with that children's symlink path. Please see + * {@link Hdfs#getFileStatus(Path f)} + */ @Override public FileStatus[] listStatus(Path f) throws IOException, UnresolvedLinkException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 72b2113943756..5a6a0f65f12f6 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -332,12 +332,9 @@ public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, MIN_REPLICATION, HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure. MIN_REPLICATION_DEFAULT); - if (LOG.isDebugEnabled()) { - LOG.debug( - "Sets " + HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure. - MIN_REPLICATION + " to " - + dtpReplaceDatanodeOnFailureReplication); - } + LOG.debug("Sets {} to {}", + HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure. + MIN_REPLICATION, dtpReplaceDatanodeOnFailureReplication); this.ugi = UserGroupInformation.getCurrentUser(); this.namenodeUri = nameNodeUri; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java index ba35d51561162..fa1cf34008ffb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java @@ -110,9 +110,7 @@ public class DFSStripedInputStream extends DFSInputStream { dataBlkNum, parityBlkNum); decoder = CodecUtil.createRawDecoder(dfsClient.getConfiguration(), ecPolicy.getCodecName(), coderOptions); - if (DFSClient.LOG.isDebugEnabled()) { - DFSClient.LOG.debug("Creating an striped input stream for file " + src); - } + DFSClient.LOG.debug("Creating an striped input stream for file {}", src); } private boolean useDirectBuffer() { @@ -465,10 +463,8 @@ protected LocatedBlock refreshLocatedBlock(LocatedBlock block) break; } } - if (DFSClient.LOG.isDebugEnabled()) { - DFSClient.LOG.debug("refreshLocatedBlock for striped blocks, offset=" - + block.getStartOffset() + ". Obtained block " + lb + ", idx=" + idx); - } + DFSClient.LOG.debug("refreshLocatedBlock for striped blocks, offset={}." + + " Obtained block {}, idx={}", block.getStartOffset(), lb, idx); return StripedBlockUtil.constructInternalBlock( lsb, i, cellSize, dataBlkNum, idx); } @@ -526,7 +522,7 @@ protected void reportLostBlock(LocatedBlock lostBlock, if (!warnedNodes.containsAll(dnUUIDs)) { DFSClient.LOG.warn(Arrays.toString(nodes) + " are unavailable and " + "all striping blocks on them are lost. " + - "IgnoredNodes = " + ignoredNodes); + "IgnoredNodes = {}", ignoredNodes); warnedNodes.addAll(dnUUIDs); } } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java index aad4a00bdeb35..4222478f976de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java @@ -503,8 +503,14 @@ private void allocateNewBlock() throws IOException { LOG.debug("Allocating new block group. The previous block group: " + prevBlockGroup); - final LocatedBlock lb = addBlock(excludedNodes, dfsClient, src, - prevBlockGroup, fileId, favoredNodes, getAddBlockFlags()); + final LocatedBlock lb; + try { + lb = addBlock(excludedNodes, dfsClient, src, + prevBlockGroup, fileId, favoredNodes, getAddBlockFlags()); + } catch (IOException ioe) { + closeAllStreamers(); + throw ioe; + } assert lb.isStriped(); // assign the new block to the current block group currentBlockGroup = lb.getBlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index b4a932ef142f8..450862b777078 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -52,6 +52,7 @@ import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider; import org.apache.hadoop.fs.InvalidPathHandleException; import org.apache.hadoop.fs.PartialListing; +import org.apache.hadoop.fs.MultipartUploaderBuilder; import org.apache.hadoop.fs.PathHandle; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Options; @@ -66,6 +67,7 @@ import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.fs.impl.FileSystemMultipartUploaderBuilder; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; @@ -1143,10 +1145,21 @@ private FileStatus[] listStatusInternal(Path p) throws IOException { /** * List all the entries of a directory * - * Note that this operation is not atomic for a large directory. - * The entries of a directory may be fetched from NameNode multiple times. - * It only guarantees that each name occurs once if a directory - * undergoes changes between the calls. + * Note that this operation is not atomic for a large directory. The entries + * of a directory may be fetched from NameNode multiple times. It only + * guarantees that each name occurs once if a directory undergoes changes + * between the calls. + * + * If any of the the immediate children of the given path f is a symlink, the + * returned FileStatus object of that children would be represented as a + * symlink. It will not be resolved to the target path and will not get the + * target path FileStatus object. The target path will be available via + * getSymlink on that children's FileStatus object. Since it represents as + * symlink, isDirectory on that children's FileStatus will return false. + * + * If you want to get the FileStatus of target path for that children, you may + * want to use GetFileStatus API with that children's symlink path. Please see + * {@link DistributedFileSystem#getFileStatus(Path f)} */ @Override public FileStatus[] listStatus(Path p) throws IOException { @@ -1712,6 +1725,12 @@ public FsServerDefaults getServerDefaults() throws IOException { /** * Returns the stat information about the file. + * + * If the given path is a symlink, the path will be resolved to a target path + * and it will get the resolved path's FileStatus object. It will not be + * represented as a symlink and isDirectory API returns true if the resolved + * path is a directory, false otherwise. + * * @throws FileNotFoundException if the file does not exist. */ @Override @@ -3598,4 +3617,10 @@ public boolean hasPathCapability(final Path path, final String capability) return super.hasPathCapability(p, capability); } + + @Override + public MultipartUploaderBuilder createMultipartUploader(final Path basePath) + throws IOException { + return new FileSystemMultipartUploaderBuilder(this, basePath); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java index c640b39b6f488..68577aad82501 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java @@ -56,7 +56,7 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.io.retry.RetryUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; @@ -355,7 +355,7 @@ public static ClientProtocol createProxyWithAlignmentContext( AlignmentContext alignmentContext) throws IOException { RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); final RetryPolicy defaultPolicy = RetryUtils.getDefaultRetryPolicy( diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java index 8fd38bdb3b795..96cbb73a3896d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java @@ -353,10 +353,8 @@ void readStripe() throws IOException { StripingChunkReadResult r = StripedBlockUtil .getNextCompletedStripedRead(service, futures, 0); dfsStripedInputStream.updateReadStats(r.getReadStats()); - if (DFSClient.LOG.isDebugEnabled()) { - DFSClient.LOG.debug("Read task returned: " + r + ", for stripe " - + alignedStripe); - } + DFSClient.LOG.debug("Read task returned: {}, for stripe {}", + r, alignedStripe); StripingChunk returnedChunk = alignedStripe.chunks[r.index]; Preconditions.checkNotNull(returnedChunk); Preconditions.checkState(returnedChunk.state == StripingChunk.PENDING); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java index 6cad69a46c4e8..30e7e00653bcc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java @@ -47,6 +47,7 @@ public static Optional hasPathCapability(final Path path, case CommonPathCapabilities.FS_CHECKSUMS: case CommonPathCapabilities.FS_CONCAT: case CommonPathCapabilities.FS_LIST_CORRUPT_FILE_BLOCKS: + case CommonPathCapabilities.FS_MULTIPART_UPLOADER: case CommonPathCapabilities.FS_PATHHANDLES: case CommonPathCapabilities.FS_PERMISSIONS: case CommonPathCapabilities.FS_SNAPSHOTS: diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index ab61e504502d1..a025b9bad2e69 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -105,6 +105,8 @@ public byte value() { public static final String DOT_SNAPSHOT_DIR = ".snapshot"; public static final String SEPARATOR_DOT_SNAPSHOT_DIR = Path.SEPARATOR + DOT_SNAPSHOT_DIR; + public static final String DOT_SNAPSHOT_DIR_SEPARATOR = + DOT_SNAPSHOT_DIR + Path.SEPARATOR; public static final String SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR = Path.SEPARATOR + DOT_SNAPSHOT_DIR + Path.SEPARATOR; public final static String DOT_RESERVED_STRING = ".reserved"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java index 4028b0e8fb245..47234e8b65d78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java @@ -68,7 +68,7 @@ import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus; import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; @@ -181,7 +181,7 @@ static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy( InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int socketTimeout) throws IOException { RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); return RPC.getProxy(ClientDatanodeProtocolPB.class, RPC.getProtocolVersion(ClientDatanodeProtocolPB.class), addr, ticket, conf, factory, socketTimeout); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index d44469211bf91..7e41460ca4c63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -237,7 +237,7 @@ import org.apache.hadoop.io.retry.AsyncCallHandler; import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; @@ -456,7 +456,7 @@ public void setPermission(String src, FsPermission permission) private void setAsyncReturnValue() { final AsyncGet asyncReturnMessage - = ProtobufRpcEngine.getAsyncReturnMessage(); + = ProtobufRpcEngine2.getAsyncReturnMessage(); final AsyncGet asyncGet = new AsyncGet() { @Override @@ -1570,7 +1570,7 @@ public AclStatus getAclStatus(String src) throws IOException { if (Client.isAsynchronousMode()) { rpcProxy.getAclStatus(null, req); final AsyncGet asyncReturnMessage - = ProtobufRpcEngine.getAsyncReturnMessage(); + = ProtobufRpcEngine2.getAsyncReturnMessage(); final AsyncGet asyncGet = new AsyncGet() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java index 5165887ece5f3..ce8a89b84acce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; @@ -84,7 +84,7 @@ static ReconfigurationProtocolPB createReconfigurationProtocolProxy( InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int socketTimeout) throws IOException { RPC.setProtocolEngine(conf, ReconfigurationProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); return RPC.getProxy(ReconfigurationProtocolPB.class, RPC.getProtocolVersion(ReconfigurationProtocolPB.class), addr, ticket, conf, factory, socketTimeout); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 4caa0e91fbb54..25e7f7373226b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -76,10 +76,12 @@ import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.GlobalStorageStatistics; import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider; +import org.apache.hadoop.fs.MultipartUploaderBuilder; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.PathCapabilities; import org.apache.hadoop.fs.StorageStatistics; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.impl.FileSystemMultipartUploaderBuilder; import org.apache.hadoop.fs.permission.FsCreateModes; import org.apache.hadoop.hdfs.DFSOpsCountStatistics; import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType; @@ -152,6 +154,7 @@ public class WebHdfsFileSystem extends FileSystem + "/v" + VERSION; public static final String EZ_HEADER = "X-Hadoop-Accept-EZ"; public static final String FEFINFO_HEADER = "X-Hadoop-feInfo"; + public static final String DFS_HTTP_POLICY_KEY = "dfs.http.policy"; /** * Default connection factory may be overridden in tests to use smaller @@ -181,6 +184,7 @@ public class WebHdfsFileSystem extends FileSystem private DFSOpsCountStatistics storageStatistics; private KeyProvider testProvider; + private boolean isTLSKrb; /** * Return the protocol scheme for the FileSystem. @@ -242,6 +246,7 @@ public synchronized void initialize(URI uri, Configuration conf .newDefaultURLConnectionFactory(connectTimeout, readTimeout, conf); } + this.isTLSKrb = "HTTPS_ONLY".equals(conf.get(DFS_HTTP_POLICY_KEY)); ugi = UserGroupInformation.getCurrentUser(); this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); @@ -699,6 +704,11 @@ protected HttpURLConnection connect(URL url) throws IOException { //redirect hostname and port redirectHost = null; + if (url.getProtocol().equals("http") && + UserGroupInformation.isSecurityEnabled() && + isTLSKrb) { + throw new IOException("Access denied: dfs.http.policy is HTTPS_ONLY."); + } // resolve redirects for a DN operation unless already resolved if (op.getRedirect() && !redirected) { @@ -2117,6 +2127,12 @@ public boolean hasPathCapability(final Path path, final String capability) return super.hasPathCapability(p, capability); } + @Override + public MultipartUploaderBuilder createMultipartUploader(final Path basePath) + throws IOException { + return new FileSystemMultipartUploaderBuilder(this, basePath); + } + /** * This class is used for opening, reading, and seeking files while using the * WebHdfsFileSystem. This class will invoke the retry policy when performing diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java index f73ea6d24712f..1ffec85e02b8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.client.impl; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.security.UserGroupInformation; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index 17be09ea1f331..bae9dd19b4053 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -96,6 +96,7 @@ import java.util.EnumSet; import java.util.List; import java.util.Map; +import java.util.Set; /** * Main class of HttpFSServer server. @@ -288,7 +289,7 @@ public InputStream run() throws Exception { case INSTRUMENTATION: { enforceRootPath(op.value(), path); Groups groups = HttpFSServerWebApp.get().get(Groups.class); - List userGroups = groups.getGroups(user.getShortUserName()); + Set userGroups = groups.getGroupsSet(user.getShortUserName()); if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) { throw new AccessControlException( "User not in HttpFSServer admin group"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Groups.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Groups.java index 90733f9cdc7e4..2cc942f8e03e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Groups.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Groups.java @@ -22,10 +22,13 @@ import java.io.IOException; import java.util.List; +import java.util.Set; @InterfaceAudience.Private public interface Groups { public List getGroups(String user) throws IOException; + Set getGroupsSet(String user) throws IOException; + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/GroupsService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/GroupsService.java index 560a3ccf6ebe4..8de0630c9b11b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/GroupsService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/GroupsService.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.util.List; +import java.util.Set; @InterfaceAudience.Private public class GroupsService extends BaseService implements Groups { @@ -50,9 +51,18 @@ public Class getInterface() { return Groups.class; } + /** + * @deprecated use {@link #getGroupsSet(String user)} + */ + @Deprecated @Override public List getGroups(String user) throws IOException { return hGroups.getGroups(user); } + @Override + public Set getGroupsSet(String user) throws IOException { + return hGroups.getGroupsSet(user); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md index 6eef9e7d30e99..665aad52c7b5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md @@ -42,7 +42,7 @@ HttpFS HTTP web-service API calls are HTTP REST calls that map to a HDFS file sy * `$ curl 'http://httpfs-host:14000/webhdfs/v1/user/foo?op=GETTRASHROOT&user.name=foo'` returns the path `/user/foo/.Trash`, if `/` is an encrypted zone, returns the path `/.Trash/foo`. See [more details](../hadoop-project-dist/hadoop-hdfs/TransparentEncryption.html#Rename_and_Trash_considerations) about trash path in an encrypted zone. -* `$ curl -X POST 'http://httpfs-host:14000/webhdfs/v1/user/foo/bar?op=MKDIRS&user.name=foo'` creates the HDFS `/user/foo/bar` directory. +* `$ curl -X PUT 'http://httpfs-host:14000/webhdfs/v1/user/foo/bar?op=MKDIRS&user.name=foo'` creates the HDFS `/user/foo/bar` directory. User and Developer Documentation -------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java index a5bbb92f2153b..6739393924e5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java @@ -60,9 +60,11 @@ import java.text.MessageFormat; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; @@ -170,6 +172,11 @@ public List getGroups(String user) throws IOException { return Arrays.asList(HadoopUsersConfTestHelper.getHadoopUserGroups(user)); } + @Override + public Set getGroupsSet(String user) throws IOException { + return new HashSet<>(getGroups(user)); + } + } private Configuration createHttpFSConf(boolean addDelegationTokenAuthHandler, diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java index 9ef786db2d3c0..2693deff7d93a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java @@ -21,7 +21,9 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Set; +import com.google.common.collect.Sets; import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.hadoop.test.HadoopUsersConfTestHelper; @@ -47,4 +49,17 @@ public void cacheGroupsRefresh() throws IOException { @Override public void cacheGroupsAdd(List groups) throws IOException { } + + @Override + public Set getGroupsSet(String user) throws IOException { + if (user.equals("root")) { + return Sets.newHashSet("admin"); + } else if (user.equals("nobody")) { + return Sets.newHashSet("nobody"); + } else { + String[] groups = HadoopUsersConfTestHelper.getHadoopUserGroups(user); + return (groups != null) ? Sets.newHashSet(groups) : + Collections.emptySet(); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt index 626c49bf192c6..6e233fd3991d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt @@ -18,6 +18,8 @@ cmake_minimum_required(VERSION 3.1 FATAL_ERROR) +project(hadoop_hdfs_native_client) + enable_testing() list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/../../../hadoop-common-project/hadoop-common) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt index 411320ad771e7..6a2f378d0a4bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt @@ -28,6 +28,8 @@ project (libhdfspp) cmake_minimum_required(VERSION 2.8) +find_package (Boost 1.72.0 REQUIRED) + enable_testing() include (CTest) @@ -220,7 +222,7 @@ include_directories( include_directories( SYSTEM ${PROJECT_BINARY_DIR}/lib/proto - third_party/asio-1.10.2/include + ${Boost_INCLUDE_DIRS} third_party/rapidxml-1.13 third_party/gmock-1.7.0 third_party/tr2 diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/ioservice.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/ioservice.h index a6ec97ad4913f..b0bac5dd7ece0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/ioservice.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/ioservice.h @@ -61,10 +61,7 @@ #include #include -// forward decl -namespace asio { - class io_service; -} +#include namespace hdfs { @@ -133,7 +130,7 @@ class IoService : public std::enable_shared_from_this * Access underlying io_service object. Only to be used in asio library calls. * After HDFS-11884 is complete only tests should need direct access to the asio::io_service. **/ - virtual asio::io_service& GetRaw() = 0; + virtual boost::asio::io_service& GetRaw() = 0; }; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt index 1ab04d36689c0..87779e7f8ae81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt @@ -19,6 +19,7 @@ if(NEED_LINK_DL) set(LIB_DL dl) endif() +include_directories(${Boost_INCLUDE_DIRS} ../../include) add_library(common_obj OBJECT status.cc sasl_digest_md5.cc ioservice_impl.cc options.cc configuration.cc configuration_loader.cc hdfs_configuration.cc uri.cc util.cc retry_policy.cc cancel_tracker.cc logging.cc libhdfs_events_impl.cc auth_info.cc namenode_info.cc statinfo.cc fsinfo.cc content_summary.cc locks.cc config_parser.cc) add_library(common $ $) target_link_libraries(common ${LIB_DL}) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/async_stream.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/async_stream.h index efe2e1c5db376..ad9ad64be6f04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/async_stream.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/async_stream.h @@ -19,15 +19,17 @@ #ifndef LIB_COMMON_ASYNC_STREAM_H_ #define LIB_COMMON_ASYNC_STREAM_H_ -#include -#include +#include +#include +#include + #include namespace hdfs { // Contiguous buffer types -typedef asio::mutable_buffers_1 MutableBuffer; -typedef asio::const_buffers_1 ConstBuffer; +typedef boost::asio::mutable_buffers_1 MutableBuffer; +typedef boost::asio::const_buffers_1 ConstBuffer; /* * asio-compatible stream implementation. @@ -38,13 +40,23 @@ typedef asio::const_buffers_1 ConstBuffer; */ class AsyncStream { public: + using executor_type = boost::asio::system_executor; + virtual void async_read_some(const MutableBuffer &buf, - std::function handler) = 0; virtual void async_write_some(const ConstBuffer &buf, - std::function handler) = 0; + + executor_type get_executor() { + return executor_; + } + +private: + executor_type executor_; + }; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/asio.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/asio.h index 0215176e6d462..f2a3722ec182c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/asio.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/asio.h @@ -21,7 +21,10 @@ #include "continuation.h" #include "common/util.h" #include "hdfspp/status.h" -#include + +#include +#include + #include namespace hdfs { @@ -37,8 +40,8 @@ class WriteContinuation : public Continuation { virtual void Run(const Next &next) override { auto handler = - [next](const asio::error_code &ec, size_t) { next(ToStatus(ec)); }; - asio::async_write(*stream_, buffer_, handler); + [next](const boost::system::error_code &ec, size_t) { next(ToStatus(ec)); }; + boost::asio::async_write(*stream_, buffer_, handler); } private: diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/protobuf.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/protobuf.h index 21e063ed0e08c..e5be85a5005b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/protobuf.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/continuation/protobuf.h @@ -20,8 +20,10 @@ #include "common/util.h" -#include - +#include +#include +#include +#include #include #include #include @@ -39,7 +41,7 @@ struct ReadDelimitedPBMessageContinuation : public Continuation { virtual void Run(const Next &next) override { namespace pbio = google::protobuf::io; - auto handler = [this, next](const asio::error_code &ec, size_t) { + auto handler = [this, next](const boost::system::error_code &ec, size_t) { Status status; if (ec) { status = ToStatus(ec); @@ -57,15 +59,15 @@ struct ReadDelimitedPBMessageContinuation : public Continuation { } next(status); }; - asio::async_read(*stream_, - asio::buffer(buf_), + boost::asio::async_read(*stream_, + boost::asio::buffer(buf_), std::bind(&ReadDelimitedPBMessageContinuation::CompletionHandler, this, std::placeholders::_1, std::placeholders::_2), handler); } private: - size_t CompletionHandler(const asio::error_code &ec, size_t transferred) { + size_t CompletionHandler(const boost::system::error_code &ec, size_t transferred) { if (ec) { return 0; } @@ -103,7 +105,7 @@ struct WriteDelimitedPBMessageContinuation : Continuation { return; } - asio::async_write(*stream_, asio::buffer(buf_), [next](const asio::error_code &ec, size_t) { next(ToStatus(ec)); } ); + boost::asio::async_write(*stream_, boost::asio::buffer(buf_), [next](const boost::system::error_code &ec, size_t) { next(ToStatus(ec)); } ); } private: diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/fsinfo.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/fsinfo.cc index 9f350a8f2cc76..f8f5923832711 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/fsinfo.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/fsinfo.cc @@ -17,6 +17,8 @@ */ #include + +#include #include #include diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.cc index de081ed148f54..17a4474a43319 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.cc @@ -125,7 +125,7 @@ void IoServiceImpl::Run() { // from escaping this library and crashing the process. // As recommended in http://www.boost.org/doc/libs/1_39_0/doc/html/boost_asio/reference/io_service.html#boost_asio.reference.io_service.effect_of_exceptions_thrown_from_handlers - asio::io_service::work work(io_service_); + boost::asio::io_service::work work(io_service_); while(true) { try @@ -145,7 +145,7 @@ void IoServiceImpl::Stop() { io_service_.stop(); } -asio::io_service& IoServiceImpl::GetRaw() { +boost::asio::io_service& IoServiceImpl::GetRaw() { return io_service_; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.h index a29985cf88a8b..2d627aabf23f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/ioservice_impl.h @@ -21,7 +21,7 @@ #include "hdfspp/ioservice.h" -#include +#include #include "common/new_delete.h" #include @@ -45,7 +45,7 @@ class IoServiceImpl : public IoService { void PostTask(std::function asyncTask) override; void Run() override; void Stop() override; - asio::io_service& GetRaw() override; + boost::asio::io_service& GetRaw() override; // Add a single worker thread, in the common case try to avoid this in favor // of Init[Default]Workers. Public for use by tests and rare cases where a @@ -57,7 +57,7 @@ class IoServiceImpl : public IoService { private: std::mutex state_lock_; - ::asio::io_service io_service_; + boost::asio::io_service io_service_; // For doing logging + resource manager updates on thread start/exit void ThreadStartHook(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.cc index 94bce83fd1e93..54048fb1201b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.cc @@ -136,7 +136,7 @@ LogMessage& LogMessage::operator<<(const std::string& str) { return *this; } -LogMessage& LogMessage::operator<<(const ::asio::ip::tcp::endpoint& endpoint) { +LogMessage& LogMessage::operator<<(const boost::asio::ip::tcp::endpoint& endpoint) { msg_buffer_ << endpoint; return *this; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.h index 4e66a93061774..8935287fe0108 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/logging.h @@ -19,7 +19,7 @@ #ifndef LIB_COMMON_LOGGING_H_ #define LIB_COMMON_LOGGING_H_ -#include +#include #include "hdfspp/log.h" @@ -193,7 +193,7 @@ class LogMessage { LogMessage& operator<<(void *); //asio types - LogMessage& operator<<(const ::asio::ip::tcp::endpoint& endpoint); + LogMessage& operator<<(const boost::asio::ip::tcp::endpoint& endpoint); //thread and mutex types LogMessage& operator<<(const std::thread::id& tid); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.cc index a04daf1a8b7f6..92054fce07e31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.cc @@ -70,7 +70,7 @@ bool ResolveInPlace(std::shared_ptr ioservice, ResolvedNamenodeInfo & return true; } -typedef std::vector endpoint_vector; +typedef std::vector endpoint_vector; // RAII wrapper class ScopedResolver { @@ -78,8 +78,8 @@ class ScopedResolver { std::shared_ptr io_service_; std::string host_; std::string port_; - ::asio::ip::tcp::resolver::query query_; - ::asio::ip::tcp::resolver resolver_; + boost::asio::ip::tcp::resolver::query query_; + boost::asio::ip::tcp::resolver resolver_; endpoint_vector endpoints_; // Caller blocks on access if resolution isn't finished @@ -111,9 +111,9 @@ class ScopedResolver { std::shared_ptr> shared_result = result_status_; // Callback to pull a copy of endpoints out of resolver and set promise - auto callback = [this, shared_result](const asio::error_code &ec, ::asio::ip::tcp::resolver::iterator out) { + auto callback = [this, shared_result](const boost::system::error_code &ec, boost::asio::ip::tcp::resolver::iterator out) { if(!ec) { - std::copy(out, ::asio::ip::tcp::resolver::iterator(), std::back_inserter(endpoints_)); + std::copy(out, boost::asio::ip::tcp::resolver::iterator(), std::back_inserter(endpoints_)); } shared_result->set_value( ToStatus(ec) ); }; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.h index f43690dcf63b5..0532376e8c75f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/namenode_info.h @@ -19,7 +19,7 @@ #ifndef COMMON_HDFS_NAMENODE_INFO_H_ #define COMMON_HDFS_NAMENODE_INFO_H_ -#include +#include #include @@ -37,7 +37,7 @@ struct ResolvedNamenodeInfo : public NamenodeInfo { ResolvedNamenodeInfo& operator=(const NamenodeInfo &info); std::string str() const; - std::vector<::asio::ip::tcp::endpoint> endpoints; + std::vector endpoints; }; // Clear endpoints if set and resolve all of them in parallel. diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/retry_policy.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/retry_policy.cc index dca49fb66212a..eb64829601b57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/retry_policy.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/retry_policy.cc @@ -19,6 +19,8 @@ #include "common/retry_policy.h" #include "common/logging.h" +#include + #include namespace hdfs { @@ -57,7 +59,7 @@ RetryAction FixedDelayWithFailover::ShouldRetry(const Status &s, uint64_t retrie (void)max_failover_conn_retries_; LOG_TRACE(kRPC, << "FixedDelayWithFailover::ShouldRetry(retries=" << retries << ", failovers=" << failovers << ")"); - if(failovers < max_failover_retries_ && (s.code() == ::asio::error::timed_out || s.get_server_exception_type() == Status::kStandbyException) ) + if(failovers < max_failover_retries_ && (s.code() == boost::asio::error::timed_out || s.get_server_exception_type() == Status::kStandbyException) ) { // Try connecting to another NN in case this one keeps timing out // Can add the backoff wait specified by dfs.client.failover.sleep.base.millis here diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc index 6a0798737b7d4..7a4b4cf33efed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc @@ -30,7 +30,7 @@ namespace hdfs { -Status ToStatus(const ::asio::error_code &ec) { +Status ToStatus(const boost::system::error_code &ec) { if (ec) { return Status(ec.value(), ec.message().c_str()); } else { @@ -134,7 +134,7 @@ std::string Base64Encode(const std::string &src) { } -std::string SafeDisconnect(asio::ip::tcp::socket *sock) { +std::string SafeDisconnect(boost::asio::ip::tcp::socket *sock) { std::string err; if(sock && sock->is_open()) { /** @@ -147,7 +147,7 @@ std::string SafeDisconnect(asio::ip::tcp::socket *sock) { **/ try { - sock->shutdown(asio::ip::tcp::socket::shutdown_both); + sock->shutdown(boost::asio::ip::tcp::socket::shutdown_both); } catch (const std::exception &e) { err = std::string("shutdown() threw") + e.what(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h index 590ba5453b6ec..a7f4f958e79d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h @@ -24,7 +24,8 @@ #include #include -#include +#include +#include #include #include @@ -41,7 +42,7 @@ namespace hdfs { typedef std::lock_guard mutex_guard; -Status ToStatus(const ::asio::error_code &ec); +Status ToStatus(const boost::system::error_code &ec); // Determine size of buffer that needs to be allocated in order to serialize msg // in delimited format @@ -75,7 +76,7 @@ bool lock_held(T & mutex) { // Shutdown and close a socket safely; will check if the socket is open and // catch anything thrown by asio. // Returns a string containing error message on failure, otherwise an empty string. -std::string SafeDisconnect(asio::ip::tcp::socket *sock); +std::string SafeDisconnect(boost::asio::ip::tcp::socket *sock); // The following helper function is used for classes that look like the following: @@ -94,13 +95,13 @@ std::string SafeDisconnect(asio::ip::tcp::socket *sock); // it's a asio socket, and nullptr if it's anything else. template -inline asio::ip::tcp::socket *get_asio_socket_ptr(sock_t *s) { +inline boost::asio::ip::tcp::socket *get_asio_socket_ptr(sock_t *s) { (void)s; return nullptr; } template<> -inline asio::ip::tcp::socket *get_asio_socket_ptr - (asio::ip::tcp::socket *s) { +inline boost::asio::ip::tcp::socket *get_asio_socket_ptr + (boost::asio::ip::tcp::socket *s) { return s; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.cc index 41424827821ea..61df6d76d99de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.cc @@ -19,6 +19,8 @@ #include "datanodeconnection.h" #include "common/util.h" +#include + namespace hdfs { DataNodeConnection::~DataNodeConnection(){} @@ -29,7 +31,7 @@ DataNodeConnectionImpl::DataNodeConnectionImpl(std::shared_ptr io_ser const hadoop::common::TokenProto *token, LibhdfsEvents *event_handlers) : event_handlers_(event_handlers) { - using namespace ::asio::ip; + using namespace boost::asio::ip; conn_.reset(new tcp::socket(io_service->GetRaw())); auto datanode_addr = dn_proto.id(); @@ -49,8 +51,8 @@ void DataNodeConnectionImpl::Connect( // Keep the DN from being freed until we're done mutex_guard state_lock(state_lock_); auto shared_this = shared_from_this(); - asio::async_connect(*conn_, endpoints_.begin(), endpoints_.end(), - [shared_this, handler](const asio::error_code &ec, std::array::iterator it) { + boost::asio::async_connect(*conn_, endpoints_.begin(), endpoints_.end(), + [shared_this, handler](const boost::system::error_code &ec, std::array::iterator it) { (void)it; handler(ToStatus(ec), shared_this); }); } @@ -69,7 +71,7 @@ void DataNodeConnectionImpl::Cancel() { } void DataNodeConnectionImpl::async_read_some(const MutableBuffer &buf, - std::function handler) + std::function handler) { event_handlers_->call("DN_read_req", "", "", buf.end() - buf.begin()); @@ -78,7 +80,7 @@ void DataNodeConnectionImpl::async_read_some(const MutableBuffer &buf, } void DataNodeConnectionImpl::async_write_some(const ConstBuffer &buf, - std::function handler) + std::function handler) { event_handlers_->call("DN_write_req", "", "", buf.end() - buf.begin()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.h index a54338f17b6b5..a0cb8375a8680 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/connection/datanodeconnection.h @@ -26,7 +26,8 @@ #include "common/util.h" #include "common/new_delete.h" -#include "asio.hpp" +#include +#include namespace hdfs { @@ -43,7 +44,7 @@ class DataNodeConnection : public AsyncStream { struct SocketDeleter { - inline void operator()(asio::ip::tcp::socket *sock) { +inline void operator()(boost::asio::ip::tcp::socket* sock) { // Cancel may have already closed the socket. std::string err = SafeDisconnect(sock); if(!err.empty()) { @@ -59,8 +60,8 @@ class DataNodeConnectionImpl : public DataNodeConnection, public std::enable_sha std::mutex state_lock_; public: MEMCHECKED_CLASS(DataNodeConnectionImpl) - std::unique_ptr conn_; - std::array endpoints_; + std::unique_ptr conn_; + std::array endpoints_; std::string uuid_; LibhdfsEvents *event_handlers_; @@ -74,10 +75,10 @@ class DataNodeConnectionImpl : public DataNodeConnection, public std::enable_sha void Cancel() override; void async_read_some(const MutableBuffer &buf, - std::function handler) override; + std::function handler) override; void async_write_some(const ConstBuffer &buf, - std::function handler) override; + std::function handler) override; }; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc index 02630fb247a6f..169def364b732 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc @@ -26,6 +26,8 @@ #include #include +#include + #define FMT_THIS_ADDR "this=" << (void*)this namespace hdfs { @@ -72,7 +74,7 @@ void FileHandleImpl::PositionRead( handler(status, bytes_read); }; - AsyncPreadSome(offset, asio::buffer(buf, buf_size), bad_node_tracker_, callback); + AsyncPreadSome(offset, boost::asio::buffer(buf, buf_size), bad_node_tracker_, callback); } Status FileHandleImpl::PositionRead(void *buf, size_t buf_size, off_t offset, size_t *bytes_read) { @@ -233,7 +235,7 @@ void FileHandleImpl::AsyncPreadSome( uint64_t offset_within_block = offset - block->offset(); uint64_t size_within_block = std::min( - block->b().numbytes() - offset_within_block, asio::buffer_size(buffer)); + block->b().numbytes() - offset_within_block, boost::asio::buffer_size(buffer)); LOG_DEBUG(kFileHandle, << "FileHandleImpl::AsyncPreadSome(" << FMT_THIS_ADDR << "), ...) Datanode hostname=" << dnHostName << ", IP Address=" << dnIpAddr @@ -281,7 +283,7 @@ void FileHandleImpl::AsyncPreadSome( if (status.ok()) { reader->AsyncReadBlock( client_name, *block, offset_within_block, - asio::buffer(buffer, size_within_block), read_handler); + boost::asio::buffer(buffer, size_within_block), read_handler); } else { handler(status, dn_id, 0); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc index 41cc645be5a20..ba75e86eec78d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc @@ -29,6 +29,8 @@ #include #include +#include + #define FMT_THIS_ADDR "this=" << (void*)this namespace hdfs { @@ -36,7 +38,7 @@ namespace hdfs { static const char kNamenodeProtocol[] = "org.apache.hadoop.hdfs.protocol.ClientProtocol"; static const int kNamenodeProtocolVersion = 1; -using ::asio::ip::tcp; +using boost::asio::ip::tcp; static constexpr uint16_t kDefaultPort = 8020; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc index e46faad127436..96744e5d03d2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc @@ -19,7 +19,7 @@ #include "filesystem.h" #include "common/continuation/asio.h" -#include +#include #include #include @@ -31,7 +31,7 @@ #define FMT_THIS_ADDR "this=" << (void*)this -using ::asio::ip::tcp; +using boost::asio::ip::tcp; namespace hdfs { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/block_reader.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/block_reader.cc index 90c02f71c1d56..acecfce52374e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/block_reader.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/block_reader.cc @@ -24,6 +24,10 @@ #include +#include +#include +#include + namespace hdfs { #define FMT_CONT_AND_PARENT_ADDR "this=" << (void*)this << ", parent=" << (void*)parent_ @@ -113,7 +117,7 @@ void BlockReaderImpl::AsyncRequestBlock(const std::string &client_name, auto read_pb_message = new continuation::ReadDelimitedPBMessageContinuation(dn_, &s->response); - m->Push(asio_continuation::Write(dn_, asio::buffer(s->header))).Push(read_pb_message); + m->Push(asio_continuation::Write(dn_, boost::asio::buffer(s->header))).Push(read_pb_message); m->Run([this, handler, offset](const Status &status, const State &s) { Status stat = status; if (stat.ok()) { @@ -167,7 +171,7 @@ struct BlockReaderImpl::ReadPacketHeader : continuation::Continuation parent_->packet_data_read_bytes_ = 0; parent_->packet_len_ = 0; - auto handler = [next, this](const asio::error_code &ec, size_t) { + auto handler = [next, this](const boost::system::error_code &ec, size_t) { Status status; if (ec) { status = Status(ec.value(), ec.message().c_str()); @@ -191,7 +195,7 @@ struct BlockReaderImpl::ReadPacketHeader : continuation::Continuation next(status); }; - asio::async_read(*parent_->dn_, asio::buffer(buf_), + boost::asio::async_read(*parent_->dn_, boost::asio::buffer(buf_), std::bind(&ReadPacketHeader::CompletionHandler, this, std::placeholders::_1, std::placeholders::_2), handler); } @@ -215,7 +219,7 @@ struct BlockReaderImpl::ReadPacketHeader : continuation::Continuation return ntohs(*reinterpret_cast(&buf_[kHeaderLenOffset])); } - size_t CompletionHandler(const asio::error_code &ec, size_t transferred) { + size_t CompletionHandler(const boost::system::error_code &ec, size_t transferred) { if (ec) { return 0; } else if (transferred < kHeaderStart) { @@ -245,7 +249,7 @@ struct BlockReaderImpl::ReadChecksum : continuation::Continuation std::shared_ptr keep_conn_alive_ = shared_conn_; - auto handler = [parent, next, this, keep_conn_alive_](const asio::error_code &ec, size_t) + auto handler = [parent, next, this, keep_conn_alive_](const boost::system::error_code &ec, size_t) { Status status; if (ec) { @@ -266,7 +270,7 @@ struct BlockReaderImpl::ReadChecksum : continuation::Continuation parent->checksum_.resize(parent->packet_len_ - sizeof(int) - parent->header_.datalen()); - asio::async_read(*parent->dn_, asio::buffer(parent->checksum_), handler); + boost::asio::async_read(*parent->dn_, boost::asio::buffer(parent->checksum_), handler); } private: @@ -279,8 +283,8 @@ struct BlockReaderImpl::ReadChecksum : continuation::Continuation struct BlockReaderImpl::ReadData : continuation::Continuation { ReadData(BlockReaderImpl *parent, std::shared_ptr bytes_transferred, - const asio::mutable_buffers_1 &buf) : parent_(parent), - bytes_transferred_(bytes_transferred), buf_(buf), shared_conn_(parent->dn_) + const boost::asio::mutable_buffers_1 &buf) : parent_(parent), + bytes_transferred_(bytes_transferred), buf_(buf), shared_conn_(parent->dn_) { buf_.begin(); } @@ -293,7 +297,7 @@ struct BlockReaderImpl::ReadData : continuation::Continuation LOG_TRACE(kBlockReader, << "BlockReaderImpl::ReadData::Run(" << FMT_CONT_AND_PARENT_ADDR << ") called"); auto handler = - [next, this](const asio::error_code &ec, size_t transferred) { + [next, this](const boost::system::error_code &ec, size_t transferred) { Status status; if (ec) { status = Status(ec.value(), ec.message().c_str()); @@ -320,13 +324,13 @@ struct BlockReaderImpl::ReadData : continuation::Continuation auto data_len = parent_->header_.datalen() - parent_->packet_data_read_bytes_; - asio::async_read(*parent_->dn_, buf_, asio::transfer_exactly(data_len), handler); + boost::asio::async_read(*parent_->dn_, buf_, boost::asio::transfer_exactly(data_len), handler); } private: BlockReaderImpl *parent_; std::shared_ptr bytes_transferred_; - const asio::mutable_buffers_1 buf_; + const boost::asio::mutable_buffers_1 buf_; // Keep DNConnection alive. std::shared_ptr shared_conn_; @@ -337,7 +341,7 @@ struct BlockReaderImpl::ReadPadding : continuation::Continuation ReadPadding(BlockReaderImpl *parent) : parent_(parent), padding_(parent->chunk_padding_bytes_), bytes_transferred_(std::make_shared(0)), - read_data_(new ReadData(parent, bytes_transferred_, asio::buffer(padding_))), + read_data_(new ReadData(parent, bytes_transferred_, boost::asio::buffer(padding_))), shared_conn_(parent->dn_) {} virtual void Run(const Next &next) override { @@ -505,7 +509,7 @@ struct BlockReaderImpl::RequestBlockContinuation : continuation::Continuation struct BlockReaderImpl::ReadBlockContinuation : continuation::Continuation { ReadBlockContinuation(BlockReader *reader, MutableBuffer buffer, size_t *transferred) - : reader_(reader), buffer_(buffer), buffer_size_(asio::buffer_size(buffer)), transferred_(transferred) {} + : reader_(reader), buffer_(buffer), buffer_size_(boost::asio::buffer_size(buffer)), transferred_(transferred) {} virtual void Run(const Next &next) override { LOG_TRACE(kBlockReader, << "BlockReaderImpl::ReadBlockContinuation::Run(" @@ -532,7 +536,7 @@ struct BlockReaderImpl::ReadBlockContinuation : continuation::Continuation next_(status); } else { reader_->AsyncReadPacket( - asio::buffer(buffer_ + *transferred_, buffer_size_ - *transferred_), + boost::asio::buffer(buffer_ + *transferred_, buffer_size_ - *transferred_), std::bind(&ReadBlockContinuation::OnReadData, this, _1, _2)); } } @@ -551,7 +555,7 @@ void BlockReaderImpl::AsyncReadBlock( auto m = continuation::Pipeline::Create(cancel_state_); size_t * bytesTransferred = &m->state(); - size_t size = asio::buffer_size(buffer); + size_t size = boost::asio::buffer_size(buffer); m->Push(new RequestBlockContinuation(this, client_name, &block.b(), size, offset)) .Push(new ReadBlockContinuation(this, buffer, bytesTransferred)); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer.h index ea176532f2368..cfa94bea2baf2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer.h @@ -21,8 +21,10 @@ #include "common/sasl_authenticator.h" #include "common/async_stream.h" #include "connection/datanodeconnection.h" + #include +#include namespace hdfs { @@ -45,13 +47,13 @@ template class DataTransferSaslStream : public DataNodeConnection template void Handshake(const Handler &next); void async_read_some(const MutableBuffer &buf, - std::function handler) override { stream_->async_read_some(buf, handler); } void async_write_some(const ConstBuffer &buf, - std::function handler) override { stream_->async_write_some(buf, handler); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer_impl.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer_impl.h index 77e618dd7a214..d77685dd45a70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer_impl.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/reader/datatransfer_impl.h @@ -23,8 +23,8 @@ #include "common/continuation/asio.h" #include "common/continuation/protobuf.h" -#include -#include +#include +#include namespace hdfs { @@ -101,7 +101,7 @@ void DataTransferSaslStream::Handshake(const Handler &next) { using ::hdfs::continuation::WriteDelimitedPBMessage; static const int kMagicNumber = htonl(kDataTransferSasl); - static const asio::const_buffers_1 kMagicNumberBuffer = asio::buffer( + static const boost::asio::const_buffers_1 kMagicNumberBuffer = boost::asio::buffer( reinterpret_cast(kMagicNumber), sizeof(kMagicNumber)); struct State { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/namenode_tracker.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/namenode_tracker.cc index 242c6eadcbb3c..acb754e738abc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/namenode_tracker.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/namenode_tracker.cc @@ -24,7 +24,7 @@ namespace hdfs { -static std::string format_endpoints(const std::vector<::asio::ip::tcp::endpoint> &pts) { +static std::string format_endpoints(const std::vector &pts) { std::stringstream ss; for(unsigned int i=0; i &se HANamenodeTracker::~HANamenodeTracker() {} -bool HANamenodeTracker::GetFailoverAndUpdate(const std::vector<::asio::ip::tcp::endpoint>& current_endpoints, +bool HANamenodeTracker::GetFailoverAndUpdate(const std::vector& current_endpoints, ResolvedNamenodeInfo& out) { mutex_guard swap_lock(swap_lock_); @@ -117,7 +117,7 @@ bool HANamenodeTracker::GetFailoverAndUpdate(const std::vector<::asio::ip::tcp:: } -bool HANamenodeTracker::IsCurrentActive_locked(const ::asio::ip::tcp::endpoint &ep) const { +bool HANamenodeTracker::IsCurrentActive_locked(const boost::asio::ip::tcp::endpoint &ep) const { for(unsigned int i=0;i +#include #include #include @@ -52,13 +52,13 @@ class HANamenodeTracker { // currently being used. Swap internal state and set out to other node. // Note: This will always mutate internal state. Use IsCurrentActive/Standby to // get info without changing state - bool GetFailoverAndUpdate(const std::vector<::asio::ip::tcp::endpoint>& current_endpoints, + bool GetFailoverAndUpdate(const std::vector& current_endpoints, ResolvedNamenodeInfo& out); private: // See if endpoint ep is part of the list of endpoints for the active or standby NN - bool IsCurrentActive_locked(const ::asio::ip::tcp::endpoint &ep) const; - bool IsCurrentStandby_locked(const ::asio::ip::tcp::endpoint &ep) const; + bool IsCurrentActive_locked(const boost::asio::ip::tcp::endpoint &ep) const; + bool IsCurrentStandby_locked(const boost::asio::ip::tcp::endpoint &ep) const; // If HA should be enabled, according to our options and runtime info like # nodes provided bool enabled_; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.h index f19554046dcf8..0e19fff87b4d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.h @@ -29,7 +29,7 @@ #include #include -#include +#include namespace hdfs { @@ -59,7 +59,7 @@ class Request { int call_id() const { return call_id_; } std::string method_name() const { return method_name_; } - ::asio::deadline_timer &timer() { return timer_; } + boost::asio::deadline_timer &timer() { return timer_; } int IncrementRetryCount() { return retry_count_++; } int IncrementFailoverCount(); void GetPacket(std::string *res) const; @@ -75,7 +75,7 @@ class Request { const std::string method_name_; const int call_id_; - ::asio::deadline_timer timer_; + boost::asio::deadline_timer timer_; std::string payload_; const Handler handler_; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection.h index 9f7b3bbd2424a..f599d36ee5c4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection.h @@ -42,6 +42,8 @@ #include #include +#include + namespace hdfs { typedef const std::function RpcCallback; @@ -57,10 +59,10 @@ class RpcConnection : public std::enable_shared_from_this { // Note that a single server can have multiple endpoints - especially both // an ipv4 and ipv6 endpoint - virtual void Connect(const std::vector<::asio::ip::tcp::endpoint> &server, + virtual void Connect(const std::vector &server, const AuthInfo & auth_info, RpcCallback &handler) = 0; - virtual void ConnectAndFlush(const std::vector<::asio::ip::tcp::endpoint> &server) = 0; + virtual void ConnectAndFlush(const std::vector &server) = 0; virtual void Disconnect() = 0; void StartReading(); @@ -110,9 +112,9 @@ class RpcConnection : public std::enable_shared_from_this { virtual void SendContext(RpcCallback &handler) = 0; void ContextComplete(const Status &s); - virtual void OnSendCompleted(const ::asio::error_code &ec, + virtual void OnSendCompleted(const boost::system::error_code &ec, size_t transferred) = 0; - virtual void OnRecvCompleted(const ::asio::error_code &ec, + virtual void OnRecvCompleted(const boost::system::error_code &ec, size_t transferred) = 0; virtual void FlushPendingRequests()=0; // Synchronously write the next request @@ -133,10 +135,10 @@ class RpcConnection : public std::enable_shared_from_this { Status HandleRpcResponse(std::shared_ptr response); void HandleRpcTimeout(std::shared_ptr req, - const ::asio::error_code &ec); + const boost::system::error_code &ec); void CommsError(const Status &status); - void ClearAndDisconnect(const ::asio::error_code &ec); + void ClearAndDisconnect(const boost::system::error_code &ec); std::shared_ptr RemoveFromRunningQueue(int call_id); std::weak_ptr engine_; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc index 43111eff9499a..82fdfeb033d38 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc @@ -23,6 +23,8 @@ #include "ProtobufRpcEngine.pb.h" #include "IpcConnectionContext.pb.h" +#include + namespace hdfs { namespace pb = ::google::protobuf; @@ -89,7 +91,7 @@ void RpcConnection::StartReading() { } service->PostLambda( - [shared_this, this] () { OnRecvCompleted(::asio::error_code(), 0); } + [shared_this, this] () { OnRecvCompleted(boost::system::error_code(), 0); } ); } @@ -248,8 +250,8 @@ Status RpcConnection::HandleRpcResponse(std::shared_ptr response) { } void RpcConnection::HandleRpcTimeout(std::shared_ptr req, - const ::asio::error_code &ec) { - if (ec.value() == asio::error::operation_aborted) { + const boost::system::error_code &ec) { + if (ec.value() == boost::asio::error::operation_aborted) { return; } @@ -260,7 +262,7 @@ void RpcConnection::HandleRpcTimeout(std::shared_ptr req, return; } - Status stat = ToStatus(ec ? ec : make_error_code(::asio::error::timed_out)); + Status stat = ToStatus(ec ? ec : make_error_code(boost::asio::error::timed_out)); r->OnResponseArrived(nullptr, stat); } @@ -469,7 +471,7 @@ void RpcConnection::CommsError(const Status &status) { pinnedEngine->AsyncRpcCommsError(status, shared_from_this(), requestsToReturn); } -void RpcConnection::ClearAndDisconnect(const ::asio::error_code &ec) { +void RpcConnection::ClearAndDisconnect(const boost::system::error_code &ec) { Disconnect(); std::vector> requests; std::transform(sent_requests_.begin(), sent_requests_.end(), diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.h index 1dd43af36b0e9..884bd64ac642c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.h @@ -28,9 +28,11 @@ #include "common/libhdfs_events_impl.h" #include "hdfspp/ioservice.h" -#include -#include -#include +#include +#include +#include +#include +#include #include @@ -44,17 +46,17 @@ class RpcConnectionImpl : public RpcConnection { RpcConnectionImpl(std::shared_ptr engine); virtual ~RpcConnectionImpl() override; - virtual void Connect(const std::vector<::asio::ip::tcp::endpoint> &server, + virtual void Connect(const std::vector &server, const AuthInfo & auth_info, RpcCallback &handler) override; virtual void ConnectAndFlush( - const std::vector<::asio::ip::tcp::endpoint> &server) override; + const std::vector &server) override; virtual void SendHandshake(RpcCallback &handler) override; virtual void SendContext(RpcCallback &handler) override; virtual void Disconnect() override; - virtual void OnSendCompleted(const ::asio::error_code &ec, + virtual void OnSendCompleted(const boost::system::error_code &ec, size_t transferred) override; - virtual void OnRecvCompleted(const ::asio::error_code &ec, + virtual void OnRecvCompleted(const boost::system::error_code &ec, size_t transferred) override; virtual void FlushPendingRequests() override; @@ -65,12 +67,12 @@ class RpcConnectionImpl : public RpcConnection { private: const Options options_; - ::asio::ip::tcp::endpoint current_endpoint_; - std::vector<::asio::ip::tcp::endpoint> additional_endpoints_; + boost::asio::ip::tcp::endpoint current_endpoint_; + std::vector additional_endpoints_; Socket socket_; - ::asio::deadline_timer connect_timer_; + boost::asio::deadline_timer connect_timer_; - void ConnectComplete(const ::asio::error_code &ec, const ::asio::ip::tcp::endpoint &remote); + void ConnectComplete(const boost::system::error_code &ec, const boost::asio::ip::tcp::endpoint &remote); }; template @@ -95,7 +97,7 @@ RpcConnectionImpl::~RpcConnectionImpl() { template void RpcConnectionImpl::Connect( - const std::vector<::asio::ip::tcp::endpoint> &server, + const std::vector &server, const AuthInfo & auth_info, RpcCallback &handler) { LOG_TRACE(kRPC, << "RpcConnectionImpl::Connect called"); @@ -124,7 +126,7 @@ void RpcConnectionImpl::Connect( template void RpcConnectionImpl::ConnectAndFlush( - const std::vector<::asio::ip::tcp::endpoint> &server) { + const std::vector &server) { LOG_INFO(kRPC, << "ConnectAndFlush called"); std::lock_guard state_lock(connection_state_lock_); @@ -147,29 +149,29 @@ void RpcConnectionImpl::ConnectAndFlush( // Take the first endpoint, but remember the alternatives for later additional_endpoints_ = server; - ::asio::ip::tcp::endpoint first_endpoint = additional_endpoints_.front(); + boost::asio::ip::tcp::endpoint first_endpoint = additional_endpoints_.front(); additional_endpoints_.erase(additional_endpoints_.begin()); current_endpoint_ = first_endpoint; auto shared_this = shared_from_this(); - socket_.async_connect(first_endpoint, [shared_this, this, first_endpoint](const ::asio::error_code &ec) { + socket_.async_connect(first_endpoint, [shared_this, this, first_endpoint](const boost::system::error_code &ec) { ConnectComplete(ec, first_endpoint); }); // Prompt the timer to timeout auto weak_this = std::weak_ptr(shared_this); connect_timer_.expires_from_now( - std::chrono::milliseconds(options_.rpc_connect_timeout)); - connect_timer_.async_wait([shared_this, this, first_endpoint](const ::asio::error_code &ec) { + boost::posix_time::milliseconds(options_.rpc_connect_timeout)); + connect_timer_.async_wait([shared_this, this, first_endpoint](const boost::system::error_code &ec) { if (ec) ConnectComplete(ec, first_endpoint); else - ConnectComplete(make_error_code(asio::error::host_unreachable), first_endpoint); + ConnectComplete(make_error_code(boost::asio::error::host_unreachable), first_endpoint); }); } template -void RpcConnectionImpl::ConnectComplete(const ::asio::error_code &ec, const ::asio::ip::tcp::endpoint & remote) { +void RpcConnectionImpl::ConnectComplete(const boost::system::error_code &ec, const boost::asio::ip::tcp::endpoint & remote) { auto shared_this = RpcConnectionImpl::shared_from_this(); std::lock_guard state_lock(connection_state_lock_); connect_timer_.cancel(); @@ -211,20 +213,20 @@ void RpcConnectionImpl::ConnectComplete(const ::asio::error_code &ec, co if (!additional_endpoints_.empty()) { // If we have additional endpoints, keep trying until we either run out or // hit one - ::asio::ip::tcp::endpoint next_endpoint = additional_endpoints_.front(); + boost::asio::ip::tcp::endpoint next_endpoint = additional_endpoints_.front(); additional_endpoints_.erase(additional_endpoints_.begin()); current_endpoint_ = next_endpoint; - socket_.async_connect(next_endpoint, [shared_this, this, next_endpoint](const ::asio::error_code &ec) { + socket_.async_connect(next_endpoint, [shared_this, this, next_endpoint](const boost::system::error_code &ec) { ConnectComplete(ec, next_endpoint); }); connect_timer_.expires_from_now( - std::chrono::milliseconds(options_.rpc_connect_timeout)); - connect_timer_.async_wait([shared_this, this, next_endpoint](const ::asio::error_code &ec) { + boost::posix_time::milliseconds(options_.rpc_connect_timeout)); + connect_timer_.async_wait([shared_this, this, next_endpoint](const boost::system::error_code &ec) { if (ec) ConnectComplete(ec, next_endpoint); else - ConnectComplete(make_error_code(asio::error::host_unreachable), next_endpoint); + ConnectComplete(make_error_code(boost::asio::error::host_unreachable), next_endpoint); }); } else { CommsError(status); @@ -241,9 +243,9 @@ void RpcConnectionImpl::SendHandshake(RpcCallback &handler) { auto shared_this = shared_from_this(); auto handshake_packet = PrepareHandshakePacket(); - ::asio::async_write(socket_, asio::buffer(*handshake_packet), + boost::asio::async_write(socket_, boost::asio::buffer(*handshake_packet), [handshake_packet, handler, shared_this, this]( - const ::asio::error_code &ec, size_t) { + const boost::system::error_code &ec, size_t) { Status status = ToStatus(ec); handler(status); }); @@ -257,16 +259,16 @@ void RpcConnectionImpl::SendContext(RpcCallback &handler) { auto shared_this = shared_from_this(); auto context_packet = PrepareContextPacket(); - ::asio::async_write(socket_, asio::buffer(*context_packet), + boost::asio::async_write(socket_, boost::asio::buffer(*context_packet), [context_packet, handler, shared_this, this]( - const ::asio::error_code &ec, size_t) { + const boost::system::error_code &ec, size_t) { Status status = ToStatus(ec); handler(status); }); } template -void RpcConnectionImpl::OnSendCompleted(const ::asio::error_code &ec, +void RpcConnectionImpl::OnSendCompleted(const boost::system::error_code &ec, size_t) { using std::placeholders::_1; using std::placeholders::_2; @@ -340,16 +342,16 @@ void RpcConnectionImpl::FlushPendingRequests() { outgoing_request_ = req; req->timer().expires_from_now( - std::chrono::milliseconds(options_.rpc_timeout)); - req->timer().async_wait([weak_this, weak_req, this](const ::asio::error_code &ec) { + boost::posix_time::milliseconds(options_.rpc_timeout)); + req->timer().async_wait([weak_this, weak_req, this](const boost::system::error_code &ec) { auto timeout_this = weak_this.lock(); auto timeout_req = weak_req.lock(); if (timeout_this && timeout_req) this->HandleRpcTimeout(timeout_req, ec); }); - asio::async_write(socket_, asio::buffer(*payload), - [shared_this, this, payload](const ::asio::error_code &ec, + boost::asio::async_write(socket_, boost::asio::buffer(*payload), + [shared_this, this, payload](const boost::system::error_code &ec, size_t size) { OnSendCompleted(ec, size); }); @@ -374,13 +376,13 @@ void RpcConnectionImpl::FlushPendingRequests() { template -void RpcConnectionImpl::OnRecvCompleted(const ::asio::error_code &original_ec, +void RpcConnectionImpl::OnRecvCompleted(const boost::system::error_code &original_ec, size_t) { using std::placeholders::_1; using std::placeholders::_2; std::lock_guard state_lock(connection_state_lock_); - ::asio::error_code my_ec(original_ec); + boost::system::error_code my_ec(original_ec); LOG_TRACE(kRPC, << "RpcConnectionImpl::OnRecvCompleted called"); @@ -390,7 +392,7 @@ void RpcConnectionImpl::OnRecvCompleted(const ::asio::error_code &origin event_response event_resp = event_handlers_->call(FS_NN_READ_EVENT, cluster_name_.c_str(), 0); #ifndef LIBHDFSPP_SIMULATE_ERROR_DISABLED if (event_resp.response_type() == event_response::kTest_Error) { - my_ec = std::make_error_code(std::errc::network_down); + my_ec = boost::system::error_code(boost::system::errc::errc_t::network_down, boost::system::system_category()); } #endif } @@ -399,7 +401,7 @@ void RpcConnectionImpl::OnRecvCompleted(const ::asio::error_code &origin case 0: // No errors break; - case asio::error::operation_aborted: + case boost::asio::error::operation_aborted: // The event loop has been shut down. Ignore the error. return; default: @@ -414,20 +416,20 @@ void RpcConnectionImpl::OnRecvCompleted(const ::asio::error_code &origin if (current_response_state_->state_ == Response::kReadLength) { current_response_state_->state_ = Response::kReadContent; - auto buf = ::asio::buffer(reinterpret_cast(¤t_response_state_->length_), + auto buf = boost::asio::buffer(reinterpret_cast(¤t_response_state_->length_), sizeof(current_response_state_->length_)); - asio::async_read( + boost::asio::async_read( socket_, buf, - [shared_this, this](const ::asio::error_code &ec, size_t size) { + [shared_this, this](const boost::system::error_code &ec, size_t size) { OnRecvCompleted(ec, size); }); } else if (current_response_state_->state_ == Response::kReadContent) { current_response_state_->state_ = Response::kParseResponse; current_response_state_->length_ = ntohl(current_response_state_->length_); current_response_state_->data_.resize(current_response_state_->length_); - asio::async_read( - socket_, ::asio::buffer(current_response_state_->data_), - [shared_this, this](const ::asio::error_code &ec, size_t size) { + boost::asio::async_read( + socket_, boost::asio::buffer(current_response_state_->data_), + [shared_this, this](const boost::system::error_code &ec, size_t size) { OnRecvCompleted(ec, size); }); } else if (current_response_state_->state_ == Response::kParseResponse) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc index ad6c9b91364f5..06cda962cf9b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc @@ -24,6 +24,8 @@ #include +#include + namespace hdfs { template @@ -171,7 +173,7 @@ std::shared_ptr RpcEngine::NewConnection() { LOG_DEBUG(kRPC, << "RpcEngine::NewConnection called"); - return std::make_shared>(shared_from_this()); + return std::make_shared>(shared_from_this()); } std::shared_ptr RpcEngine::InitializeConnection() @@ -307,8 +309,8 @@ void RpcEngine::RpcCommsError( if (head_action->delayMillis > 0) { auto weak_conn = std::weak_ptr(conn_); retry_timer.expires_from_now( - std::chrono::milliseconds(head_action->delayMillis)); - retry_timer.async_wait([this, weak_conn](asio::error_code ec) { + boost::posix_time::milliseconds(head_action->delayMillis)); + retry_timer.async_wait([this, weak_conn](boost::system::error_code ec) { auto strong_conn = weak_conn.lock(); if ( (!ec) && (strong_conn) ) { strong_conn->ConnectAndFlush(last_endpoints_); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h index 845eaf5868789..13e56c5b92fb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h @@ -31,8 +31,8 @@ #include -#include -#include +#include +#include #include #include @@ -160,7 +160,7 @@ class RpcEngine : public LockFreeRpcEngine, public std::enable_shared_from_this< static std::string getRandomClientId(); // Remember all of the last endpoints in case we need to reconnect and retry - std::vector<::asio::ip::tcp::endpoint> last_endpoints_; + std::vector last_endpoints_; private: mutable std::shared_ptr io_service_; @@ -173,7 +173,7 @@ class RpcEngine : public LockFreeRpcEngine, public std::enable_shared_from_this< AuthInfo auth_info_; std::string cluster_name_; std::atomic_int call_id_; - ::asio::deadline_timer retry_timer; + boost::asio::deadline_timer retry_timer; std::shared_ptr event_handlers_; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt index 59fdbf20a27d3..2b2f4f16f1677 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt @@ -16,6 +16,8 @@ # limitations under the License. # +find_package(Boost REQUIRED COMPONENTS date_time) + # Delegate some functionality to libhdfs, until libhdfspp is complete. set (LIBHDFS_SRC_DIR ../../libhdfs) set (LIBHDFS_TESTS_DIR ../../libhdfs-tests) @@ -81,7 +83,7 @@ add_memcheck_test(retry_policy retry_policy_test) include_directories(${CMAKE_CURRENT_BINARY_DIR}) add_executable(rpc_engine_test rpc_engine_test.cc ${PROTO_TEST_SRCS} ${PROTO_TEST_HDRS}) -target_link_libraries(rpc_engine_test test_common rpc proto common ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} gmock_main ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(rpc_engine_test test_common rpc proto common ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} gmock_main ${CMAKE_THREAD_LIBS_INIT} ${Boost_LIBRARIES}) add_memcheck_test(rpc_engine rpc_engine_test) add_executable(bad_datanode_test bad_datanode_test.cc) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc index 23de0154f8ab0..5417af8f4cf11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc @@ -25,6 +25,9 @@ #include +#include +#include + using hadoop::common::TokenProto; using hadoop::hdfs::DatanodeInfoProto; using hadoop::hdfs::DatanodeIDProto; @@ -42,7 +45,7 @@ class MockReader : public BlockReader { public: MOCK_METHOD2( AsyncReadPacket, - void(const asio::mutable_buffers_1 &, + void(const boost::asio::mutable_buffers_1 &, const std::function &)); MOCK_METHOD5(AsyncRequestBlock, @@ -69,17 +72,17 @@ class MockDNConnection : public DataNodeConnection, public std::enable_shared_fr } void async_read_some(const MutableBuffer &buf, - std::function handler) override { (void)buf; - handler(asio::error::fault, 0); + handler(boost::asio::error::fault, 0); } void async_write_some(const ConstBuffer &buf, - std::function handler) override { (void)buf; - handler(asio::error::fault, 0); + handler(boost::asio::error::fault, 0); } virtual void Cancel() override { @@ -141,7 +144,7 @@ TEST(BadDataNodeTest, TestNoNodes) { size_t read = 0; // Exclude the one datanode with the data - is.AsyncPreadSome(0, asio::buffer(buf, sizeof(buf)), nullptr, + is.AsyncPreadSome(0, boost::asio::buffer(buf, sizeof(buf)), nullptr, [&stat, &read](const Status &status, const std::string &, size_t transferred) { stat = status; read = transferred; @@ -202,7 +205,7 @@ TEST(BadDataNodeTest, NNEventCallback) { Status::OK(), 0)); is.AsyncPreadSome( - 0, asio::buffer(buf, sizeof(buf)), nullptr, + 0, boost::asio::buffer(buf, sizeof(buf)), nullptr, [&stat, &read](const Status &status, const std::string &, size_t transferred) { stat = status; @@ -248,7 +251,7 @@ TEST(BadDataNodeTest, RecoverableError) { is.AsyncPreadSome( - 0, asio::buffer(buf, sizeof(buf)), nullptr, + 0, boost::asio::buffer(buf, sizeof(buf)), nullptr, [&stat, &read](const Status &status, const std::string &, size_t transferred) { stat = status; @@ -300,7 +303,7 @@ TEST(BadDataNodeTest, InternalError) { sizeof(buf))); is.AsyncPreadSome( - 0, asio::buffer(buf, sizeof(buf)), nullptr, + 0, boost::asio::buffer(buf, sizeof(buf)), nullptr, [&stat, &read](const Status &status, const std::string &, size_t transferred) { stat = status; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.cc index 1885eea8da184..37fabf568d275 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.cc @@ -20,7 +20,7 @@ namespace hdfs { -MockConnectionBase::MockConnectionBase(::asio::io_service *io_service) +MockConnectionBase::MockConnectionBase(boost::asio::io_service *io_service) : io_service_(io_service) {} @@ -31,7 +31,7 @@ ProducerResult SharedMockConnection::Produce() { return shared_prducer->Produce(); } else { assert(false && "No producer registered"); - return std::make_pair(asio::error_code(), ""); + return std::make_pair(boost::system::error_code(), ""); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.h index 82db760421314..7a7b5f076ed39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/mock_connection.h @@ -20,21 +20,21 @@ #include "common/async_stream.h" -#include -#include -#include -#include +#include +#include +#include +#include #include namespace hdfs { -typedef std::pair ProducerResult; +typedef std::pair ProducerResult; class AsioProducer { public: /* * Return either: - * (::asio::error_code(), ) for a good result + * (::boost::system::error_code(), ) for a good result * (, ) to pass an error to the caller * (::asio::error::would_block, ) to block the next call forever */ @@ -45,53 +45,53 @@ class AsioProducer { class MockConnectionBase : public AsioProducer, public AsyncStream { public: - MockConnectionBase(::asio::io_service *io_service); + MockConnectionBase(boost::asio::io_service *io_service); virtual ~MockConnectionBase(); - typedef std::pair ProducerResult; + typedef std::pair ProducerResult; void async_read_some(const MutableBuffer &buf, - std::function handler) override { if (produced_.size() == 0) { ProducerResult r = Produce(); - if (r.first == asio::error::would_block) { + if (r.first == boost::asio::error::would_block) { return; // No more reads to do } if (r.first) { io_service_->post(std::bind(handler, r.first, 0)); return; } - asio::mutable_buffers_1 data = produced_.prepare(r.second.size()); - asio::buffer_copy(data, asio::buffer(r.second)); + boost::asio::mutable_buffers_1 data = produced_.prepare(r.second.size()); + boost::asio::buffer_copy(data, boost::asio::buffer(r.second)); produced_.commit(r.second.size()); } - size_t len = std::min(asio::buffer_size(buf), produced_.size()); - asio::buffer_copy(buf, produced_.data()); + size_t len = std::min(boost::asio::buffer_size(buf), produced_.size()); + boost::asio::buffer_copy(buf, produced_.data()); produced_.consume(len); - io_service_->post(std::bind(handler, asio::error_code(), len)); + io_service_->post(std::bind(handler, boost::system::error_code(), len)); } void async_write_some(const ConstBuffer &buf, - std::function handler) override { // CompletionResult res = OnWrite(buf); - io_service_->post(std::bind(handler, asio::error_code(), asio::buffer_size(buf))); + io_service_->post(std::bind(handler, boost::system::error_code(), boost::asio::buffer_size(buf))); } template void async_connect(const Endpoint &, Callback &&handler) { - io_service_->post([handler]() { handler(::asio::error_code()); }); + io_service_->post([handler]() { handler(::boost::system::error_code()); }); } virtual void cancel() {} virtual void close() {} protected: ProducerResult Produce() override = 0; - ::asio::io_service *io_service_; + boost::asio::io_service *io_service_; private: - asio::streambuf produced_; + boost::asio::streambuf produced_; }; @@ -114,10 +114,10 @@ class SharedMockConnection : public MockConnectionBase { assert(data); if (!data->checkProducerForConnect) { - io_service_->post([handler]() { handler(::asio::error_code()); }); + io_service_->post([handler]() { handler(::boost::system::error_code()); }); } else { ProducerResult result = Produce(); - if (result.first == asio::error::would_block) { + if (result.first == boost::asio::error::would_block) { return; // Connect will hang } else { io_service_->post([handler, result]() { handler( result.first); }); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc index 3997e64be5ce2..dfee686b60244 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc @@ -29,6 +29,9 @@ #include #include #include +#include +#include +#include #include @@ -44,9 +47,9 @@ using ::hadoop::hdfs::ReadOpChecksumInfoProto; using ::hadoop::hdfs::LocatedBlockProto; using ::hadoop::hdfs::LocatedBlocksProto; -using ::asio::buffer; -using ::asio::error_code; -using ::asio::mutable_buffers_1; +using boost::asio::buffer; +using boost::system::error_code; +using boost::asio::mutable_buffers_1; using ::testing::_; using ::testing::InvokeArgument; using ::testing::Return; @@ -60,7 +63,7 @@ namespace hdfs { class MockDNConnection : public MockConnectionBase, public DataNodeConnection{ public: - MockDNConnection(::asio::io_service &io_service) + MockDNConnection(boost::asio::io_service &io_service) : MockConnectionBase(&io_service), OnRead([](){}) {} MOCK_METHOD0(Produce, ProducerResult()); @@ -70,14 +73,14 @@ class MockDNConnection : public MockConnectionBase, public DataNodeConnection{ std::function OnRead; void async_read_some(const MutableBuffer &buf, - std::function handler) override { this->OnRead(); this->MockConnectionBase::async_read_some(buf, handler); } void async_write_some(const ConstBuffer &buf, - std::function handler) override { this->MockConnectionBase::async_write_some(buf, handler); } @@ -96,7 +99,7 @@ class PartialMockReader : public BlockReaderImpl { MOCK_METHOD2( AsyncReadPacket, - void(const asio::mutable_buffers_1 &, + void(const boost::asio::mutable_buffers_1 &, const std::function &)); MOCK_METHOD5(AsyncRequestBlock, @@ -163,7 +166,7 @@ TEST(RemoteBlockReaderTest, TestReadSingleTrunk) { .WillOnce(InvokeArgument<1>(Status::OK(), sizeof(buf))); reader.AsyncReadBlock( - GetRandomClientName(), block, 0, asio::buffer(buf, sizeof(buf)), + GetRandomClientName(), block, 0, boost::asio::buffer(buf, sizeof(buf)), [&stat, &read](const Status &status, size_t transferred) { stat = status; read = transferred; @@ -190,7 +193,7 @@ TEST(RemoteBlockReaderTest, TestReadMultipleTrunk) { .WillRepeatedly(InvokeArgument<1>(Status::OK(), sizeof(buf) / 4)); reader.AsyncReadBlock( - GetRandomClientName(), block, 0, asio::buffer(buf, sizeof(buf)), + GetRandomClientName(), block, 0, boost::asio::buffer(buf, sizeof(buf)), [&stat, &read](const Status &status, size_t transferred) { stat = status; read = transferred; @@ -218,7 +221,7 @@ TEST(RemoteBlockReaderTest, TestReadError) { .WillOnce(InvokeArgument<1>(Status::Error("error"), 0)); reader.AsyncReadBlock( - GetRandomClientName(), block, 0, asio::buffer(buf, sizeof(buf)), + GetRandomClientName(), block, 0, boost::asio::buffer(buf, sizeof(buf)), [&stat, &read](const Status &status, size_t transferred) { stat = status; read = transferred; @@ -250,7 +253,7 @@ ReadContent(std::shared_ptr conn, const ExtendedBlockProto &block, TEST(RemoteBlockReaderTest, TestReadWholeBlock) { static const size_t kChunkSize = 512; static const string kChunkData(kChunkSize, 'a'); - ::asio::io_service io_service; + boost::asio::io_service io_service; auto conn = std::make_shared(io_service); BlockOpResponseProto block_op_resp; @@ -287,7 +290,7 @@ TEST(RemoteBlockReaderTest, TestCancelWhileReceiving) { static const size_t kChunkSize = 512; static const string kChunkData(kChunkSize, 'a'); - ::asio::io_service io_service; + boost::asio::io_service io_service; auto conn = std::make_shared(io_service); BlockOpResponseProto block_op_resp; @@ -338,7 +341,7 @@ TEST(RemoteBlockReaderTest, TestReadWithinChunk) { static const size_t kOffset = kChunkSize / 4; static const string kChunkData = string(kOffset, 'a') + string(kLength, 'b'); - ::asio::io_service io_service; + boost::asio::io_service io_service; auto conn = std::make_shared(io_service); BlockOpResponseProto block_op_resp; ReadOpChecksumInfoProto *checksum_info = @@ -378,7 +381,7 @@ TEST(RemoteBlockReaderTest, TestReadMultiplePacket) { static const size_t kChunkSize = 1024; static const string kChunkData(kChunkSize, 'a'); - ::asio::io_service io_service; + boost::asio::io_service io_service; auto conn = std::make_shared(io_service); BlockOpResponseProto block_op_resp; block_op_resp.set_status(::hadoop::hdfs::Status::SUCCESS); @@ -428,7 +431,7 @@ TEST(RemoteBlockReaderTest, TestReadCancelBetweenPackets) { static const size_t kChunkSize = 1024; static const string kChunkData(kChunkSize, 'a'); - ::asio::io_service io_service; + boost::asio::io_service io_service; auto conn = std::make_shared(io_service); BlockOpResponseProto block_op_resp; block_op_resp.set_status(::hadoop::hdfs::Status::SUCCESS); @@ -482,7 +485,7 @@ TEST(RemoteBlockReaderTest, TestSaslConnection) { static const string kAuthPayload = "realm=\"0\",nonce=\"+GAWc+O6yEAWpew/" "qKah8qh4QZLoOLCDcTtEKhlS\",qop=\"auth\"," "charset=utf-8,algorithm=md5-sess"; - ::asio::io_service io_service; + boost::asio::io_service io_service; auto conn = std::make_shared(io_service); BlockOpResponseProto block_op_resp; block_op_resp.set_status(::hadoop::hdfs::Status::SUCCESS); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc index 6bbe7259ad853..744e7eba16d8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc @@ -26,6 +26,8 @@ #include #include +#include +#include using ::hadoop::common::RpcResponseHeaderProto; using ::hadoop::common::EmptyRequestProto; @@ -33,8 +35,6 @@ using ::hadoop::common::EmptyResponseProto; using ::hadoop::common::EchoRequestProto; using ::hadoop::common::EchoResponseProto; -using ::asio::error_code; - using ::testing::Return; using ::std::make_pair; @@ -47,20 +47,20 @@ namespace hdfs { std::vector make_endpoint() { ResolvedNamenodeInfo result; - result.endpoints.push_back(asio::ip::basic_endpoint()); + result.endpoints.push_back(boost::asio::ip::basic_endpoint()); return std::vector({result}); } class MockRPCConnection : public MockConnectionBase { public: - MockRPCConnection(::asio::io_service &io_service) + MockRPCConnection(boost::asio::io_service &io_service) : MockConnectionBase(&io_service) {} MOCK_METHOD0(Produce, ProducerResult()); }; class SharedMockRPCConnection : public SharedMockConnection { public: - SharedMockRPCConnection(::asio::io_service &io_service) + SharedMockRPCConnection(boost::asio::io_service &io_service) : SharedMockConnection(&io_service) {} }; @@ -79,9 +79,9 @@ class SharedConnectionEngine : public RpcEngine { } -static inline std::pair RpcResponse( +static inline std::pair RpcResponse( const RpcResponseHeaderProto &h, const std::string &data, - const ::asio::error_code &ec = error_code()) { + const boost::system::error_code &ec = boost::system::error_code()) { uint32_t payload_length = pbio::CodedOutputStream::VarintSize32(h.ByteSize()) + pbio::CodedOutputStream::VarintSize32(data.size()) + h.ByteSize() + @@ -157,7 +157,7 @@ TEST(RpcEngineTest, TestConnectionResetAndFail) { h.set_status(RpcResponseHeaderProto::SUCCESS); EXPECT_CALL(conn->TEST_get_mutable_socket(), Produce()) .WillOnce(Return(RpcResponse( - h, "", make_error_code(::asio::error::connection_reset)))); + h, "", make_error_code(boost::asio::error::connection_reset)))); std::shared_ptr conn_ptr(conn); engine->TEST_SetRpcConnection(conn_ptr); @@ -200,7 +200,7 @@ TEST(RpcEngineTest, TestConnectionResetAndRecover) { h.set_status(RpcResponseHeaderProto::SUCCESS); EXPECT_CALL(*producer, Produce()) .WillOnce(Return(RpcResponse( - h, "", make_error_code(::asio::error::connection_reset)))) + h, "", make_error_code(boost::asio::error::connection_reset)))) .WillOnce(Return(RpcResponse(h, server_resp.SerializeAsString()))); SharedMockConnection::SetSharedConnectionData(producer); @@ -240,7 +240,7 @@ TEST(RpcEngineTest, TestConnectionResetAndRecoverWithDelay) { h.set_status(RpcResponseHeaderProto::SUCCESS); EXPECT_CALL(*producer, Produce()) .WillOnce(Return(RpcResponse( - h, "", make_error_code(::asio::error::connection_reset)))) + h, "", make_error_code(boost::asio::error::connection_reset)))) .WillOnce(Return(RpcResponse(h, server_resp.SerializeAsString()))); SharedMockConnection::SetSharedConnectionData(producer); @@ -254,9 +254,9 @@ TEST(RpcEngineTest, TestConnectionResetAndRecoverWithDelay) { ASSERT_TRUE(stat.ok()); }); - ::asio::deadline_timer timer(io_service->GetRaw()); - timer.expires_from_now(std::chrono::hours(100)); - timer.async_wait([](const asio::error_code & err){(void)err; ASSERT_FALSE("Timed out"); }); + boost::asio::deadline_timer timer(io_service->GetRaw()); + timer.expires_from_now(boost::posix_time::hours(100)); + timer.async_wait([](const boost::system::error_code & err){(void)err; ASSERT_FALSE("Timed out"); }); io_service->Run(); ASSERT_TRUE(complete); @@ -279,7 +279,7 @@ TEST(RpcEngineTest, TestConnectionFailure) std::shared_ptr engine = std::make_shared(io_service, options, "foo", "", "protocol", 1); EXPECT_CALL(*producer, Produce()) - .WillOnce(Return(std::make_pair(make_error_code(::asio::error::connection_reset), ""))); + .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))); engine->Connect("", make_endpoint(), [&complete, io_service](const Status &stat) { complete = true; @@ -306,9 +306,9 @@ TEST(RpcEngineTest, TestConnectionFailureRetryAndFailure) std::shared_ptr engine = std::make_shared(io_service, options, "foo", "", "protocol", 1); EXPECT_CALL(*producer, Produce()) - .WillOnce(Return(std::make_pair(make_error_code(::asio::error::connection_reset), ""))) - .WillOnce(Return(std::make_pair(make_error_code(::asio::error::connection_reset), ""))) - .WillOnce(Return(std::make_pair(make_error_code(::asio::error::connection_reset), ""))); + .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))) + .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))) + .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))); engine->Connect("", make_endpoint(), [&complete, io_service](const Status &stat) { complete = true; @@ -335,9 +335,9 @@ TEST(RpcEngineTest, TestConnectionFailureAndRecover) std::shared_ptr engine = std::make_shared(io_service, options, "foo", "", "protocol", 1); EXPECT_CALL(*producer, Produce()) - .WillOnce(Return(std::make_pair(make_error_code(::asio::error::connection_reset), ""))) - .WillOnce(Return(std::make_pair(::asio::error_code(), ""))) - .WillOnce(Return(std::make_pair(::asio::error::would_block, ""))); + .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))) + .WillOnce(Return(std::make_pair(boost::system::error_code(), ""))) + .WillOnce(Return(std::make_pair(boost::asio::error::would_block, ""))); engine->Connect("", make_endpoint(), [&complete, io_service](const Status &stat) { complete = true; @@ -390,8 +390,8 @@ TEST(RpcEngineTest, TestEventCallbacks) h.set_callid(1); h.set_status(RpcResponseHeaderProto::SUCCESS); EXPECT_CALL(*producer, Produce()) - .WillOnce(Return(std::make_pair(::asio::error_code(), ""))) // subverted by callback - .WillOnce(Return(std::make_pair(::asio::error_code(), ""))) + .WillOnce(Return(std::make_pair(boost::system::error_code(), ""))) // subverted by callback + .WillOnce(Return(std::make_pair(boost::system::error_code(), ""))) .WillOnce(Return(RpcResponse(h, "b"))) // subverted by callback .WillOnce(Return(RpcResponse(h, server_resp.SerializeAsString()))); SharedMockConnection::SetSharedConnectionData(producer); @@ -444,9 +444,9 @@ TEST(RpcEngineTest, TestConnectionFailureAndAsyncRecover) std::shared_ptr engine = std::make_shared(io_service, options, "foo", "", "protocol", 1); EXPECT_CALL(*producer, Produce()) - .WillOnce(Return(std::make_pair(make_error_code(::asio::error::connection_reset), ""))) - .WillOnce(Return(std::make_pair(::asio::error_code(), ""))) - .WillOnce(Return(std::make_pair(::asio::error::would_block, ""))); + .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))) + .WillOnce(Return(std::make_pair(boost::system::error_code(), ""))) + .WillOnce(Return(std::make_pair(boost::asio::error::would_block, ""))); engine->Connect("", make_endpoint(), [&complete, io_service](const Status &stat) { complete = true; @@ -454,9 +454,9 @@ TEST(RpcEngineTest, TestConnectionFailureAndAsyncRecover) ASSERT_TRUE(stat.ok()); }); - ::asio::deadline_timer timer(io_service->GetRaw()); - timer.expires_from_now(std::chrono::hours(100)); - timer.async_wait([](const asio::error_code & err){(void)err; ASSERT_FALSE("Timed out"); }); + boost::asio::deadline_timer timer(io_service->GetRaw()); + timer.expires_from_now(boost::posix_time::hours(100)); + timer.async_wait([](const boost::system::error_code & err){(void)err; ASSERT_FALSE("Timed out"); }); io_service->Run(); ASSERT_TRUE(complete); @@ -473,7 +473,7 @@ TEST(RpcEngineTest, TestTimeout) { conn->StartReading(); EXPECT_CALL(conn->TEST_get_mutable_socket(), Produce()) - .WillOnce(Return(std::make_pair(::asio::error::would_block, ""))); + .WillOnce(Return(std::make_pair(boost::asio::error::would_block, ""))); std::shared_ptr conn_ptr(conn); engine->TEST_SetRpcConnection(conn_ptr); @@ -489,9 +489,9 @@ TEST(RpcEngineTest, TestTimeout) { ASSERT_FALSE(stat.ok()); }); - ::asio::deadline_timer timer(io_service->GetRaw()); - timer.expires_from_now(std::chrono::hours(100)); - timer.async_wait([](const asio::error_code & err){(void)err; ASSERT_FALSE("Timed out"); }); + boost::asio::deadline_timer timer(io_service->GetRaw()); + timer.expires_from_now(boost::posix_time::hours(100)); + timer.async_wait([](const boost::system::error_code & err){(void)err; ASSERT_FALSE("Timed out"); }); io_service->Run(); ASSERT_TRUE(complete); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/COPYING b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/COPYING deleted file mode 100644 index e86a3819fc51e..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/COPYING +++ /dev/null @@ -1,4 +0,0 @@ -Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) - -Distributed under the Boost Software License, Version 1.0. (See accompanying -file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio.hpp deleted file mode 100644 index 1f478409c2aac..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio.hpp +++ /dev/null @@ -1,122 +0,0 @@ -// -// asio.hpp -// ~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_HPP -#define ASIO_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/async_result.hpp" -#include "asio/basic_datagram_socket.hpp" -#include "asio/basic_deadline_timer.hpp" -#include "asio/basic_io_object.hpp" -#include "asio/basic_raw_socket.hpp" -#include "asio/basic_seq_packet_socket.hpp" -#include "asio/basic_serial_port.hpp" -#include "asio/basic_signal_set.hpp" -#include "asio/basic_socket_acceptor.hpp" -#include "asio/basic_socket_iostream.hpp" -#include "asio/basic_socket_streambuf.hpp" -#include "asio/basic_stream_socket.hpp" -#include "asio/basic_streambuf.hpp" -#include "asio/basic_waitable_timer.hpp" -#include "asio/buffer.hpp" -#include "asio/buffered_read_stream_fwd.hpp" -#include "asio/buffered_read_stream.hpp" -#include "asio/buffered_stream_fwd.hpp" -#include "asio/buffered_stream.hpp" -#include "asio/buffered_write_stream_fwd.hpp" -#include "asio/buffered_write_stream.hpp" -#include "asio/buffers_iterator.hpp" -#include "asio/completion_condition.hpp" -#include "asio/connect.hpp" -#include "asio/coroutine.hpp" -#include "asio/datagram_socket_service.hpp" -#include "asio/deadline_timer_service.hpp" -#include "asio/deadline_timer.hpp" -#include "asio/error.hpp" -#include "asio/error_code.hpp" -#include "asio/generic/basic_endpoint.hpp" -#include "asio/generic/datagram_protocol.hpp" -#include "asio/generic/raw_protocol.hpp" -#include "asio/generic/seq_packet_protocol.hpp" -#include "asio/generic/stream_protocol.hpp" -#include "asio/handler_alloc_hook.hpp" -#include "asio/handler_continuation_hook.hpp" -#include "asio/handler_invoke_hook.hpp" -#include "asio/handler_type.hpp" -#include "asio/io_service.hpp" -#include "asio/ip/address.hpp" -#include "asio/ip/address_v4.hpp" -#include "asio/ip/address_v6.hpp" -#include "asio/ip/basic_endpoint.hpp" -#include "asio/ip/basic_resolver.hpp" -#include "asio/ip/basic_resolver_entry.hpp" -#include "asio/ip/basic_resolver_iterator.hpp" -#include "asio/ip/basic_resolver_query.hpp" -#include "asio/ip/host_name.hpp" -#include "asio/ip/icmp.hpp" -#include "asio/ip/multicast.hpp" -#include "asio/ip/resolver_query_base.hpp" -#include "asio/ip/resolver_service.hpp" -#include "asio/ip/tcp.hpp" -#include "asio/ip/udp.hpp" -#include "asio/ip/unicast.hpp" -#include "asio/ip/v6_only.hpp" -#include "asio/is_read_buffered.hpp" -#include "asio/is_write_buffered.hpp" -#include "asio/local/basic_endpoint.hpp" -#include "asio/local/connect_pair.hpp" -#include "asio/local/datagram_protocol.hpp" -#include "asio/local/stream_protocol.hpp" -#include "asio/placeholders.hpp" -#include "asio/posix/basic_descriptor.hpp" -#include "asio/posix/basic_stream_descriptor.hpp" -#include "asio/posix/descriptor_base.hpp" -#include "asio/posix/stream_descriptor.hpp" -#include "asio/posix/stream_descriptor_service.hpp" -#include "asio/raw_socket_service.hpp" -#include "asio/read.hpp" -#include "asio/read_at.hpp" -#include "asio/read_until.hpp" -#include "asio/seq_packet_socket_service.hpp" -#include "asio/serial_port.hpp" -#include "asio/serial_port_base.hpp" -#include "asio/serial_port_service.hpp" -#include "asio/signal_set.hpp" -#include "asio/signal_set_service.hpp" -#include "asio/socket_acceptor_service.hpp" -#include "asio/socket_base.hpp" -#include "asio/strand.hpp" -#include "asio/stream_socket_service.hpp" -#include "asio/streambuf.hpp" -#include "asio/system_error.hpp" -#include "asio/thread.hpp" -#include "asio/time_traits.hpp" -#include "asio/version.hpp" -#include "asio/wait_traits.hpp" -#include "asio/waitable_timer_service.hpp" -#include "asio/windows/basic_handle.hpp" -#include "asio/windows/basic_object_handle.hpp" -#include "asio/windows/basic_random_access_handle.hpp" -#include "asio/windows/basic_stream_handle.hpp" -#include "asio/windows/object_handle.hpp" -#include "asio/windows/object_handle_service.hpp" -#include "asio/windows/overlapped_ptr.hpp" -#include "asio/windows/random_access_handle.hpp" -#include "asio/windows/random_access_handle_service.hpp" -#include "asio/windows/stream_handle.hpp" -#include "asio/windows/stream_handle_service.hpp" -#include "asio/write.hpp" -#include "asio/write_at.hpp" - -#endif // ASIO_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/async_result.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/async_result.hpp deleted file mode 100644 index b98d7703385fe..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/async_result.hpp +++ /dev/null @@ -1,94 +0,0 @@ -// -// async_result.hpp -// ~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_ASYNC_RESULT_HPP -#define ASIO_ASYNC_RESULT_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" -#include "asio/handler_type.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// An interface for customising the behaviour of an initiating function. -/** - * This template may be specialised for user-defined handler types. - */ -template -class async_result -{ -public: - /// The return type of the initiating function. - typedef void type; - - /// Construct an async result from a given handler. - /** - * When using a specalised async_result, the constructor has an opportunity - * to initialise some state associated with the handler, which is then - * returned from the initiating function. - */ - explicit async_result(Handler&) - { - } - - /// Obtain the value to be returned from the initiating function. - type get() - { - } -}; - -namespace detail { - -// Helper template to deduce the true type of a handler, capture a local copy -// of the handler, and then create an async_result for the handler. -template -struct async_result_init -{ - explicit async_result_init(ASIO_MOVE_ARG(Handler) orig_handler) - : handler(ASIO_MOVE_CAST(Handler)(orig_handler)), - result(handler) - { - } - - typename handler_type::type handler; - async_result::type> result; -}; - -template -struct async_result_type_helper -{ - typedef typename async_result< - typename handler_type::type - >::type type; -}; - -} // namespace detail -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#if defined(GENERATING_DOCUMENTATION) -# define ASIO_INITFN_RESULT_TYPE(h, sig) \ - void_or_deduced -#elif defined(_MSC_VER) && (_MSC_VER < 1500) -# define ASIO_INITFN_RESULT_TYPE(h, sig) \ - typename ::asio::detail::async_result_type_helper::type -#else -# define ASIO_INITFN_RESULT_TYPE(h, sig) \ - typename ::asio::async_result< \ - typename ::asio::handler_type::type>::type -#endif - -#endif // ASIO_ASYNC_RESULT_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_datagram_socket.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_datagram_socket.hpp deleted file mode 100644 index a1356b90fba57..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_datagram_socket.hpp +++ /dev/null @@ -1,949 +0,0 @@ -// -// basic_datagram_socket.hpp -// ~~~~~~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_DATAGRAM_SOCKET_HPP -#define ASIO_BASIC_DATAGRAM_SOCKET_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" -#include -#include "asio/basic_socket.hpp" -#include "asio/datagram_socket_service.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/detail/type_traits.hpp" -#include "asio/error.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides datagram-oriented socket functionality. -/** - * The basic_datagram_socket class template provides asynchronous and blocking - * datagram-oriented socket functionality. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - */ -template > -class basic_datagram_socket - : public basic_socket -{ -public: - /// (Deprecated: Use native_handle_type.) The native representation of a - /// socket. - typedef typename DatagramSocketService::native_handle_type native_type; - - /// The native representation of a socket. - typedef typename DatagramSocketService::native_handle_type native_handle_type; - - /// The protocol type. - typedef Protocol protocol_type; - - /// The endpoint type. - typedef typename Protocol::endpoint endpoint_type; - - /// Construct a basic_datagram_socket without opening it. - /** - * This constructor creates a datagram socket without opening it. The open() - * function must be called before data can be sent or received on the socket. - * - * @param io_service The io_service object that the datagram socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - */ - explicit basic_datagram_socket(asio::io_service& io_service) - : basic_socket(io_service) - { - } - - /// Construct and open a basic_datagram_socket. - /** - * This constructor creates and opens a datagram socket. - * - * @param io_service The io_service object that the datagram socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @throws asio::system_error Thrown on failure. - */ - basic_datagram_socket(asio::io_service& io_service, - const protocol_type& protocol) - : basic_socket(io_service, protocol) - { - } - - /// Construct a basic_datagram_socket, opening it and binding it to the given - /// local endpoint. - /** - * This constructor creates a datagram socket and automatically opens it bound - * to the specified endpoint on the local machine. The protocol used is the - * protocol associated with the given endpoint. - * - * @param io_service The io_service object that the datagram socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - * - * @param endpoint An endpoint on the local machine to which the datagram - * socket will be bound. - * - * @throws asio::system_error Thrown on failure. - */ - basic_datagram_socket(asio::io_service& io_service, - const endpoint_type& endpoint) - : basic_socket(io_service, endpoint) - { - } - - /// Construct a basic_datagram_socket on an existing native socket. - /** - * This constructor creates a datagram socket object to hold an existing - * native socket. - * - * @param io_service The io_service object that the datagram socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @param native_socket The new underlying socket implementation. - * - * @throws asio::system_error Thrown on failure. - */ - basic_datagram_socket(asio::io_service& io_service, - const protocol_type& protocol, const native_handle_type& native_socket) - : basic_socket( - io_service, protocol, native_socket) - { - } - -#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - /// Move-construct a basic_datagram_socket from another. - /** - * This constructor moves a datagram socket from one object to another. - * - * @param other The other basic_datagram_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_datagram_socket(io_service&) constructor. - */ - basic_datagram_socket(basic_datagram_socket&& other) - : basic_socket( - ASIO_MOVE_CAST(basic_datagram_socket)(other)) - { - } - - /// Move-assign a basic_datagram_socket from another. - /** - * This assignment operator moves a datagram socket from one object to - * another. - * - * @param other The other basic_datagram_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_datagram_socket(io_service&) constructor. - */ - basic_datagram_socket& operator=(basic_datagram_socket&& other) - { - basic_socket::operator=( - ASIO_MOVE_CAST(basic_datagram_socket)(other)); - return *this; - } - - /// Move-construct a basic_datagram_socket from a socket of another protocol - /// type. - /** - * This constructor moves a datagram socket from one object to another. - * - * @param other The other basic_datagram_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_datagram_socket(io_service&) constructor. - */ - template - basic_datagram_socket( - basic_datagram_socket&& other, - typename enable_if::value>::type* = 0) - : basic_socket( - ASIO_MOVE_CAST2(basic_datagram_socket< - Protocol1, DatagramSocketService1>)(other)) - { - } - - /// Move-assign a basic_datagram_socket from a socket of another protocol - /// type. - /** - * This assignment operator moves a datagram socket from one object to - * another. - * - * @param other The other basic_datagram_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_datagram_socket(io_service&) constructor. - */ - template - typename enable_if::value, - basic_datagram_socket>::type& operator=( - basic_datagram_socket&& other) - { - basic_socket::operator=( - ASIO_MOVE_CAST2(basic_datagram_socket< - Protocol1, DatagramSocketService1>)(other)); - return *this; - } -#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - - /// Send some data on a connected socket. - /** - * This function is used to send data on the datagram socket. The function - * call will block until the data has been sent successfully or an error - * occurs. - * - * @param buffers One ore more data buffers to be sent on the socket. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - * - * @note The send operation can only be used with a connected socket. Use - * the send_to function to send data on an unconnected datagram socket. - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code socket.send(asio::buffer(data, size)); @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t send(const ConstBufferSequence& buffers) - { - asio::error_code ec; - std::size_t s = this->get_service().send( - this->get_implementation(), buffers, 0, ec); - asio::detail::throw_error(ec, "send"); - return s; - } - - /// Send some data on a connected socket. - /** - * This function is used to send data on the datagram socket. The function - * call will block until the data has been sent successfully or an error - * occurs. - * - * @param buffers One ore more data buffers to be sent on the socket. - * - * @param flags Flags specifying how the send call is to be made. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - * - * @note The send operation can only be used with a connected socket. Use - * the send_to function to send data on an unconnected datagram socket. - */ - template - std::size_t send(const ConstBufferSequence& buffers, - socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().send( - this->get_implementation(), buffers, flags, ec); - asio::detail::throw_error(ec, "send"); - return s; - } - - /// Send some data on a connected socket. - /** - * This function is used to send data on the datagram socket. The function - * call will block until the data has been sent successfully or an error - * occurs. - * - * @param buffers One or more data buffers to be sent on the socket. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes sent. - * - * @note The send operation can only be used with a connected socket. Use - * the send_to function to send data on an unconnected datagram socket. - */ - template - std::size_t send(const ConstBufferSequence& buffers, - socket_base::message_flags flags, asio::error_code& ec) - { - return this->get_service().send( - this->get_implementation(), buffers, flags, ec); - } - - /// Start an asynchronous send on a connected socket. - /** - * This function is used to asynchronously send data on the datagram socket. - * The function call always returns immediately. - * - * @param buffers One or more data buffers to be sent on the socket. Although - * the buffers object may be copied as necessary, ownership of the underlying - * memory blocks is retained by the caller, which must guarantee that they - * remain valid until the handler is called. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_send operation can only be used with a connected socket. - * Use the async_send_to function to send data on an unconnected datagram - * socket. - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * socket.async_send(asio::buffer(data, size), handler); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send(const ConstBufferSequence& buffers, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send(this->get_implementation(), - buffers, 0, ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Start an asynchronous send on a connected socket. - /** - * This function is used to asynchronously send data on the datagram socket. - * The function call always returns immediately. - * - * @param buffers One or more data buffers to be sent on the socket. Although - * the buffers object may be copied as necessary, ownership of the underlying - * memory blocks is retained by the caller, which must guarantee that they - * remain valid until the handler is called. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_send operation can only be used with a connected socket. - * Use the async_send_to function to send data on an unconnected datagram - * socket. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send(const ConstBufferSequence& buffers, - socket_base::message_flags flags, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send(this->get_implementation(), - buffers, flags, ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Send a datagram to the specified endpoint. - /** - * This function is used to send a datagram to the specified remote endpoint. - * The function call will block until the data has been sent successfully or - * an error occurs. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * - * @param destination The remote endpoint to which the data will be sent. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * asio::ip::udp::endpoint destination( - * asio::ip::address::from_string("1.2.3.4"), 12345); - * socket.send_to(asio::buffer(data, size), destination); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination) - { - asio::error_code ec; - std::size_t s = this->get_service().send_to( - this->get_implementation(), buffers, destination, 0, ec); - asio::detail::throw_error(ec, "send_to"); - return s; - } - - /// Send a datagram to the specified endpoint. - /** - * This function is used to send a datagram to the specified remote endpoint. - * The function call will block until the data has been sent successfully or - * an error occurs. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * - * @param destination The remote endpoint to which the data will be sent. - * - * @param flags Flags specifying how the send call is to be made. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - */ - template - std::size_t send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().send_to( - this->get_implementation(), buffers, destination, flags, ec); - asio::detail::throw_error(ec, "send_to"); - return s; - } - - /// Send a datagram to the specified endpoint. - /** - * This function is used to send a datagram to the specified remote endpoint. - * The function call will block until the data has been sent successfully or - * an error occurs. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * - * @param destination The remote endpoint to which the data will be sent. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes sent. - */ - template - std::size_t send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, socket_base::message_flags flags, - asio::error_code& ec) - { - return this->get_service().send_to(this->get_implementation(), - buffers, destination, flags, ec); - } - - /// Start an asynchronous send. - /** - * This function is used to asynchronously send a datagram to the specified - * remote endpoint. The function call always returns immediately. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param destination The remote endpoint to which the data will be sent. - * Copies will be made of the endpoint as required. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * asio::ip::udp::endpoint destination( - * asio::ip::address::from_string("1.2.3.4"), 12345); - * socket.async_send_to( - * asio::buffer(data, size), destination, handler); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send_to( - this->get_implementation(), buffers, destination, 0, - ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Start an asynchronous send. - /** - * This function is used to asynchronously send a datagram to the specified - * remote endpoint. The function call always returns immediately. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param destination The remote endpoint to which the data will be sent. - * Copies will be made of the endpoint as required. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, socket_base::message_flags flags, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send_to( - this->get_implementation(), buffers, destination, flags, - ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Receive some data on a connected socket. - /** - * This function is used to receive data on the datagram socket. The function - * call will block until data has been received successfully or an error - * occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - * - * @note The receive operation can only be used with a connected socket. Use - * the receive_from function to receive data on an unconnected datagram - * socket. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code socket.receive(asio::buffer(data, size)); @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t receive(const MutableBufferSequence& buffers) - { - asio::error_code ec; - std::size_t s = this->get_service().receive( - this->get_implementation(), buffers, 0, ec); - asio::detail::throw_error(ec, "receive"); - return s; - } - - /// Receive some data on a connected socket. - /** - * This function is used to receive data on the datagram socket. The function - * call will block until data has been received successfully or an error - * occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - * - * @note The receive operation can only be used with a connected socket. Use - * the receive_from function to receive data on an unconnected datagram - * socket. - */ - template - std::size_t receive(const MutableBufferSequence& buffers, - socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().receive( - this->get_implementation(), buffers, flags, ec); - asio::detail::throw_error(ec, "receive"); - return s; - } - - /// Receive some data on a connected socket. - /** - * This function is used to receive data on the datagram socket. The function - * call will block until data has been received successfully or an error - * occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes received. - * - * @note The receive operation can only be used with a connected socket. Use - * the receive_from function to receive data on an unconnected datagram - * socket. - */ - template - std::size_t receive(const MutableBufferSequence& buffers, - socket_base::message_flags flags, asio::error_code& ec) - { - return this->get_service().receive( - this->get_implementation(), buffers, flags, ec); - } - - /// Start an asynchronous receive on a connected socket. - /** - * This function is used to asynchronously receive data from the datagram - * socket. The function call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_receive operation can only be used with a connected socket. - * Use the async_receive_from function to receive data on an unconnected - * datagram socket. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * socket.async_receive(asio::buffer(data, size), handler); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive(const MutableBufferSequence& buffers, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive(this->get_implementation(), - buffers, 0, ASIO_MOVE_CAST(ReadHandler)(handler)); - } - - /// Start an asynchronous receive on a connected socket. - /** - * This function is used to asynchronously receive data from the datagram - * socket. The function call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_receive operation can only be used with a connected socket. - * Use the async_receive_from function to receive data on an unconnected - * datagram socket. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive(const MutableBufferSequence& buffers, - socket_base::message_flags flags, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive(this->get_implementation(), - buffers, flags, ASIO_MOVE_CAST(ReadHandler)(handler)); - } - - /// Receive a datagram with the endpoint of the sender. - /** - * This function is used to receive a datagram. The function call will block - * until data has been received successfully or an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the datagram. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * asio::ip::udp::endpoint sender_endpoint; - * socket.receive_from( - * asio::buffer(data, size), sender_endpoint); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint) - { - asio::error_code ec; - std::size_t s = this->get_service().receive_from( - this->get_implementation(), buffers, sender_endpoint, 0, ec); - asio::detail::throw_error(ec, "receive_from"); - return s; - } - - /// Receive a datagram with the endpoint of the sender. - /** - * This function is used to receive a datagram. The function call will block - * until data has been received successfully or an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the datagram. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - */ - template - std::size_t receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().receive_from( - this->get_implementation(), buffers, sender_endpoint, flags, ec); - asio::detail::throw_error(ec, "receive_from"); - return s; - } - - /// Receive a datagram with the endpoint of the sender. - /** - * This function is used to receive a datagram. The function call will block - * until data has been received successfully or an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the datagram. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes received. - */ - template - std::size_t receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, socket_base::message_flags flags, - asio::error_code& ec) - { - return this->get_service().receive_from(this->get_implementation(), - buffers, sender_endpoint, flags, ec); - } - - /// Start an asynchronous receive. - /** - * This function is used to asynchronously receive a datagram. The function - * call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the datagram. Ownership of the sender_endpoint object - * is retained by the caller, which must guarantee that it is valid until the - * handler is called. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code socket.async_receive_from( - * asio::buffer(data, size), sender_endpoint, handler); @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive_from( - this->get_implementation(), buffers, sender_endpoint, 0, - ASIO_MOVE_CAST(ReadHandler)(handler)); - } - - /// Start an asynchronous receive. - /** - * This function is used to asynchronously receive a datagram. The function - * call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the datagram. Ownership of the sender_endpoint object - * is retained by the caller, which must guarantee that it is valid until the - * handler is called. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, socket_base::message_flags flags, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive_from( - this->get_implementation(), buffers, sender_endpoint, flags, - ASIO_MOVE_CAST(ReadHandler)(handler)); - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // ASIO_BASIC_DATAGRAM_SOCKET_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_deadline_timer.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_deadline_timer.hpp deleted file mode 100644 index d0fc371508075..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_deadline_timer.hpp +++ /dev/null @@ -1,520 +0,0 @@ -// -// basic_deadline_timer.hpp -// ~~~~~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_DEADLINE_TIMER_HPP -#define ASIO_BASIC_DEADLINE_TIMER_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" - -#if defined(ASIO_HAS_BOOST_DATE_TIME) \ - || defined(ASIO_CPP11_DATE_TIME) \ - || defined(GENERATING_DOCUMENTATION) - -#include -#include "asio/basic_io_object.hpp" -#include "asio/deadline_timer_service.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/error.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides waitable timer functionality. -/** - * The basic_deadline_timer class template provides the ability to perform a - * blocking or asynchronous wait for a timer to expire. - * - * A deadline timer is always in one of two states: "expired" or "not expired". - * If the wait() or async_wait() function is called on an expired timer, the - * wait operation will complete immediately. - * - * Most applications will use the asio::deadline_timer typedef. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - * - * @par Examples - * Performing a blocking wait: - * @code - * // Construct a timer without setting an expiry time. - * asio::deadline_timer timer(io_service); - * - * // Set an expiry time relative to now. - * timer.expires_from_now(boost::posix_time::seconds(5)); - * - * // Wait for the timer to expire. - * timer.wait(); - * @endcode - * - * @par - * Performing an asynchronous wait: - * @code - * void handler(const asio::error_code& error) - * { - * if (!error) - * { - * // Timer expired. - * } - * } - * - * ... - * - * // Construct a timer with an absolute expiry time. - * asio::deadline_timer timer(io_service, - * boost::posix_time::time_from_string("2005-12-07 23:59:59.000")); - * - * // Start an asynchronous wait. - * timer.async_wait(handler); - * @endcode - * - * @par Changing an active deadline_timer's expiry time - * - * Changing the expiry time of a timer while there are pending asynchronous - * waits causes those wait operations to be cancelled. To ensure that the action - * associated with the timer is performed only once, use something like this: - * used: - * - * @code - * void on_some_event() - * { - * if (my_timer.expires_from_now(seconds(5)) > 0) - * { - * // We managed to cancel the timer. Start new asynchronous wait. - * my_timer.async_wait(on_timeout); - * } - * else - * { - * // Too late, timer has already expired! - * } - * } - * - * void on_timeout(const asio::error_code& e) - * { - * if (e != asio::error::operation_aborted) - * { - * // Timer was not cancelled, take necessary action. - * } - * } - * @endcode - * - * @li The asio::basic_deadline_timer::expires_from_now() function - * cancels any pending asynchronous waits, and returns the number of - * asynchronous waits that were cancelled. If it returns 0 then you were too - * late and the wait handler has already been executed, or will soon be - * executed. If it returns 1 then the wait handler was successfully cancelled. - * - * @li If a wait handler is cancelled, the asio::error_code passed to - * it contains the value asio::error::operation_aborted. - */ -template , - typename TimerService = deadline_timer_service > -class basic_deadline_timer - : public basic_io_object -{ -public: - /// The time traits type. - typedef TimeTraits traits_type; - - /// The time type. - typedef typename traits_type::time_type time_type; - - /// The duration type. - typedef typename traits_type::duration_type duration_type; - - /// Constructor. - /** - * This constructor creates a timer without setting an expiry time. The - * expires_at() or expires_from_now() functions must be called to set an - * expiry time before the timer can be waited on. - * - * @param io_service The io_service object that the timer will use to dispatch - * handlers for any asynchronous operations performed on the timer. - */ - explicit basic_deadline_timer(asio::io_service& io_service) - : basic_io_object(io_service) - { - } - - /// Constructor to set a particular expiry time as an absolute time. - /** - * This constructor creates a timer and sets the expiry time. - * - * @param io_service The io_service object that the timer will use to dispatch - * handlers for any asynchronous operations performed on the timer. - * - * @param expiry_time The expiry time to be used for the timer, expressed - * as an absolute time. - */ - basic_deadline_timer(asio::io_service& io_service, - const time_type& expiry_time) - : basic_io_object(io_service) - { - asio::error_code ec; - this->service.expires_at(this->implementation, expiry_time, ec); - asio::detail::throw_error(ec, "expires_at"); - } - - /// Constructor to set a particular expiry time relative to now. - /** - * This constructor creates a timer and sets the expiry time. - * - * @param io_service The io_service object that the timer will use to dispatch - * handlers for any asynchronous operations performed on the timer. - * - * @param expiry_time The expiry time to be used for the timer, relative to - * now. - */ - basic_deadline_timer(asio::io_service& io_service, - const duration_type& expiry_time) - : basic_io_object(io_service) - { - asio::error_code ec; - this->service.expires_from_now(this->implementation, expiry_time, ec); - asio::detail::throw_error(ec, "expires_from_now"); - } - - /// Cancel any asynchronous operations that are waiting on the timer. - /** - * This function forces the completion of any pending asynchronous wait - * operations against the timer. The handler for each cancelled operation will - * be invoked with the asio::error::operation_aborted error code. - * - * Cancelling the timer does not change the expiry time. - * - * @return The number of asynchronous operations that were cancelled. - * - * @throws asio::system_error Thrown on failure. - * - * @note If the timer has already expired when cancel() is called, then the - * handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t cancel() - { - asio::error_code ec; - std::size_t s = this->service.cancel(this->implementation, ec); - asio::detail::throw_error(ec, "cancel"); - return s; - } - - /// Cancel any asynchronous operations that are waiting on the timer. - /** - * This function forces the completion of any pending asynchronous wait - * operations against the timer. The handler for each cancelled operation will - * be invoked with the asio::error::operation_aborted error code. - * - * Cancelling the timer does not change the expiry time. - * - * @param ec Set to indicate what error occurred, if any. - * - * @return The number of asynchronous operations that were cancelled. - * - * @note If the timer has already expired when cancel() is called, then the - * handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t cancel(asio::error_code& ec) - { - return this->service.cancel(this->implementation, ec); - } - - /// Cancels one asynchronous operation that is waiting on the timer. - /** - * This function forces the completion of one pending asynchronous wait - * operation against the timer. Handlers are cancelled in FIFO order. The - * handler for the cancelled operation will be invoked with the - * asio::error::operation_aborted error code. - * - * Cancelling the timer does not change the expiry time. - * - * @return The number of asynchronous operations that were cancelled. That is, - * either 0 or 1. - * - * @throws asio::system_error Thrown on failure. - * - * @note If the timer has already expired when cancel_one() is called, then - * the handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t cancel_one() - { - asio::error_code ec; - std::size_t s = this->service.cancel_one(this->implementation, ec); - asio::detail::throw_error(ec, "cancel_one"); - return s; - } - - /// Cancels one asynchronous operation that is waiting on the timer. - /** - * This function forces the completion of one pending asynchronous wait - * operation against the timer. Handlers are cancelled in FIFO order. The - * handler for the cancelled operation will be invoked with the - * asio::error::operation_aborted error code. - * - * Cancelling the timer does not change the expiry time. - * - * @param ec Set to indicate what error occurred, if any. - * - * @return The number of asynchronous operations that were cancelled. That is, - * either 0 or 1. - * - * @note If the timer has already expired when cancel_one() is called, then - * the handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t cancel_one(asio::error_code& ec) - { - return this->service.cancel_one(this->implementation, ec); - } - - /// Get the timer's expiry time as an absolute time. - /** - * This function may be used to obtain the timer's current expiry time. - * Whether the timer has expired or not does not affect this value. - */ - time_type expires_at() const - { - return this->service.expires_at(this->implementation); - } - - /// Set the timer's expiry time as an absolute time. - /** - * This function sets the expiry time. Any pending asynchronous wait - * operations will be cancelled. The handler for each cancelled operation will - * be invoked with the asio::error::operation_aborted error code. - * - * @param expiry_time The expiry time to be used for the timer. - * - * @return The number of asynchronous operations that were cancelled. - * - * @throws asio::system_error Thrown on failure. - * - * @note If the timer has already expired when expires_at() is called, then - * the handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t expires_at(const time_type& expiry_time) - { - asio::error_code ec; - std::size_t s = this->service.expires_at( - this->implementation, expiry_time, ec); - asio::detail::throw_error(ec, "expires_at"); - return s; - } - - /// Set the timer's expiry time as an absolute time. - /** - * This function sets the expiry time. Any pending asynchronous wait - * operations will be cancelled. The handler for each cancelled operation will - * be invoked with the asio::error::operation_aborted error code. - * - * @param expiry_time The expiry time to be used for the timer. - * - * @param ec Set to indicate what error occurred, if any. - * - * @return The number of asynchronous operations that were cancelled. - * - * @note If the timer has already expired when expires_at() is called, then - * the handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t expires_at(const time_type& expiry_time, - asio::error_code& ec) - { - return this->service.expires_at(this->implementation, expiry_time, ec); - } - - /// Get the timer's expiry time relative to now. - /** - * This function may be used to obtain the timer's current expiry time. - * Whether the timer has expired or not does not affect this value. - */ - duration_type expires_from_now() const - { - return this->service.expires_from_now(this->implementation); - } - - /// Set the timer's expiry time relative to now. - /** - * This function sets the expiry time. Any pending asynchronous wait - * operations will be cancelled. The handler for each cancelled operation will - * be invoked with the asio::error::operation_aborted error code. - * - * @param expiry_time The expiry time to be used for the timer. - * - * @return The number of asynchronous operations that were cancelled. - * - * @throws asio::system_error Thrown on failure. - * - * @note If the timer has already expired when expires_from_now() is called, - * then the handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t expires_from_now(const duration_type& expiry_time) - { - asio::error_code ec; - std::size_t s = this->service.expires_from_now( - this->implementation, expiry_time, ec); - asio::detail::throw_error(ec, "expires_from_now"); - return s; - } - - /// Set the timer's expiry time relative to now. - /** - * This function sets the expiry time. Any pending asynchronous wait - * operations will be cancelled. The handler for each cancelled operation will - * be invoked with the asio::error::operation_aborted error code. - * - * @param expiry_time The expiry time to be used for the timer. - * - * @param ec Set to indicate what error occurred, if any. - * - * @return The number of asynchronous operations that were cancelled. - * - * @note If the timer has already expired when expires_from_now() is called, - * then the handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - std::size_t expires_from_now(const duration_type& expiry_time, - asio::error_code& ec) - { - return this->service.expires_from_now( - this->implementation, expiry_time, ec); - } - - /// Perform a blocking wait on the timer. - /** - * This function is used to wait for the timer to expire. This function - * blocks and does not return until the timer has expired. - * - * @throws asio::system_error Thrown on failure. - */ - void wait() - { - asio::error_code ec; - this->service.wait(this->implementation, ec); - asio::detail::throw_error(ec, "wait"); - } - - /// Perform a blocking wait on the timer. - /** - * This function is used to wait for the timer to expire. This function - * blocks and does not return until the timer has expired. - * - * @param ec Set to indicate what error occurred, if any. - */ - void wait(asio::error_code& ec) - { - this->service.wait(this->implementation, ec); - } - - /// Start an asynchronous wait on the timer. - /** - * This function may be used to initiate an asynchronous wait against the - * timer. It always returns immediately. - * - * For each call to async_wait(), the supplied handler will be called exactly - * once. The handler will be called when: - * - * @li The timer has expired. - * - * @li The timer was cancelled, in which case the handler is passed the error - * code asio::error::operation_aborted. - * - * @param handler The handler to be called when the timer expires. Copies - * will be made of the handler as required. The function signature of the - * handler must be: - * @code void handler( - * const asio::error_code& error // Result of operation. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - */ - template - ASIO_INITFN_RESULT_TYPE(WaitHandler, - void (asio::error_code)) - async_wait(ASIO_MOVE_ARG(WaitHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WaitHandler. - ASIO_WAIT_HANDLER_CHECK(WaitHandler, handler) type_check; - - return this->service.async_wait(this->implementation, - ASIO_MOVE_CAST(WaitHandler)(handler)); - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // defined(ASIO_HAS_BOOST_DATE_TIME) - // || defined(ASIO_CPP11_DATE_TIME) - // || defined(GENERATING_DOCUMENTATION) - -#endif // ASIO_BASIC_DEADLINE_TIMER_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_io_object.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_io_object.hpp deleted file mode 100644 index 6154d92f3616d..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_io_object.hpp +++ /dev/null @@ -1,240 +0,0 @@ -// -// basic_io_object.hpp -// ~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_IO_OBJECT_HPP -#define ASIO_BASIC_IO_OBJECT_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" -#include "asio/io_service.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -#if defined(ASIO_HAS_MOVE) -namespace detail -{ - // Type trait used to determine whether a service supports move. - template - class service_has_move - { - private: - typedef IoObjectService service_type; - typedef typename service_type::implementation_type implementation_type; - - template - static auto eval(T* t, U* u) -> decltype(t->move_construct(*u, *u), char()); - static char (&eval(...))[2]; - - public: - static const bool value = - sizeof(service_has_move::eval( - static_cast(0), - static_cast(0))) == 1; - }; -} -#endif // defined(ASIO_HAS_MOVE) - -/// Base class for all I/O objects. -/** - * @note All I/O objects are non-copyable. However, when using C++0x, certain - * I/O objects do support move construction and move assignment. - */ -#if !defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) -template -#else -template ::value> -#endif -class basic_io_object -{ -public: - /// The type of the service that will be used to provide I/O operations. - typedef IoObjectService service_type; - - /// The underlying implementation type of I/O object. - typedef typename service_type::implementation_type implementation_type; - - /// Get the io_service associated with the object. - /** - * This function may be used to obtain the io_service object that the I/O - * object uses to dispatch handlers for asynchronous operations. - * - * @return A reference to the io_service object that the I/O object will use - * to dispatch handlers. Ownership is not transferred to the caller. - */ - asio::io_service& get_io_service() - { - return service.get_io_service(); - } - -protected: - /// Construct a basic_io_object. - /** - * Performs: - * @code get_service().construct(get_implementation()); @endcode - */ - explicit basic_io_object(asio::io_service& io_service) - : service(asio::use_service(io_service)) - { - service.construct(implementation); - } - -#if defined(GENERATING_DOCUMENTATION) - /// Move-construct a basic_io_object. - /** - * Performs: - * @code get_service().move_construct( - * get_implementation(), other.get_implementation()); @endcode - * - * @note Available only for services that support movability, - */ - basic_io_object(basic_io_object&& other); - - /// Move-assign a basic_io_object. - /** - * Performs: - * @code get_service().move_assign(get_implementation(), - * other.get_service(), other.get_implementation()); @endcode - * - * @note Available only for services that support movability, - */ - basic_io_object& operator=(basic_io_object&& other); -#endif // defined(GENERATING_DOCUMENTATION) - - /// Protected destructor to prevent deletion through this type. - /** - * Performs: - * @code get_service().destroy(get_implementation()); @endcode - */ - ~basic_io_object() - { - service.destroy(implementation); - } - - /// Get the service associated with the I/O object. - service_type& get_service() - { - return service; - } - - /// Get the service associated with the I/O object. - const service_type& get_service() const - { - return service; - } - - /// (Deprecated: Use get_service().) The service associated with the I/O - /// object. - /** - * @note Available only for services that do not support movability. - */ - service_type& service; - - /// Get the underlying implementation of the I/O object. - implementation_type& get_implementation() - { - return implementation; - } - - /// Get the underlying implementation of the I/O object. - const implementation_type& get_implementation() const - { - return implementation; - } - - /// (Deprecated: Use get_implementation().) The underlying implementation of - /// the I/O object. - implementation_type implementation; - -private: - basic_io_object(const basic_io_object&); - basic_io_object& operator=(const basic_io_object&); -}; - -#if defined(ASIO_HAS_MOVE) -// Specialisation for movable objects. -template -class basic_io_object -{ -public: - typedef IoObjectService service_type; - typedef typename service_type::implementation_type implementation_type; - - asio::io_service& get_io_service() - { - return service_->get_io_service(); - } - -protected: - explicit basic_io_object(asio::io_service& io_service) - : service_(&asio::use_service(io_service)) - { - service_->construct(implementation); - } - - basic_io_object(basic_io_object&& other) - : service_(&other.get_service()) - { - service_->move_construct(implementation, other.implementation); - } - - ~basic_io_object() - { - service_->destroy(implementation); - } - - basic_io_object& operator=(basic_io_object&& other) - { - service_->move_assign(implementation, - *other.service_, other.implementation); - service_ = other.service_; - return *this; - } - - service_type& get_service() - { - return *service_; - } - - const service_type& get_service() const - { - return *service_; - } - - implementation_type& get_implementation() - { - return implementation; - } - - const implementation_type& get_implementation() const - { - return implementation; - } - - implementation_type implementation; - -private: - basic_io_object(const basic_io_object&); - void operator=(const basic_io_object&); - - IoObjectService* service_; -}; -#endif // defined(ASIO_HAS_MOVE) - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // ASIO_BASIC_IO_OBJECT_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_raw_socket.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_raw_socket.hpp deleted file mode 100644 index b0f3f186b2b57..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_raw_socket.hpp +++ /dev/null @@ -1,940 +0,0 @@ -// -// basic_raw_socket.hpp -// ~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_RAW_SOCKET_HPP -#define ASIO_BASIC_RAW_SOCKET_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" -#include -#include "asio/basic_socket.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/detail/type_traits.hpp" -#include "asio/error.hpp" -#include "asio/raw_socket_service.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides raw-oriented socket functionality. -/** - * The basic_raw_socket class template provides asynchronous and blocking - * raw-oriented socket functionality. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - */ -template > -class basic_raw_socket - : public basic_socket -{ -public: - /// (Deprecated: Use native_handle_type.) The native representation of a - /// socket. - typedef typename RawSocketService::native_handle_type native_type; - - /// The native representation of a socket. - typedef typename RawSocketService::native_handle_type native_handle_type; - - /// The protocol type. - typedef Protocol protocol_type; - - /// The endpoint type. - typedef typename Protocol::endpoint endpoint_type; - - /// Construct a basic_raw_socket without opening it. - /** - * This constructor creates a raw socket without opening it. The open() - * function must be called before data can be sent or received on the socket. - * - * @param io_service The io_service object that the raw socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - */ - explicit basic_raw_socket(asio::io_service& io_service) - : basic_socket(io_service) - { - } - - /// Construct and open a basic_raw_socket. - /** - * This constructor creates and opens a raw socket. - * - * @param io_service The io_service object that the raw socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @throws asio::system_error Thrown on failure. - */ - basic_raw_socket(asio::io_service& io_service, - const protocol_type& protocol) - : basic_socket(io_service, protocol) - { - } - - /// Construct a basic_raw_socket, opening it and binding it to the given - /// local endpoint. - /** - * This constructor creates a raw socket and automatically opens it bound - * to the specified endpoint on the local machine. The protocol used is the - * protocol associated with the given endpoint. - * - * @param io_service The io_service object that the raw socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - * - * @param endpoint An endpoint on the local machine to which the raw - * socket will be bound. - * - * @throws asio::system_error Thrown on failure. - */ - basic_raw_socket(asio::io_service& io_service, - const endpoint_type& endpoint) - : basic_socket(io_service, endpoint) - { - } - - /// Construct a basic_raw_socket on an existing native socket. - /** - * This constructor creates a raw socket object to hold an existing - * native socket. - * - * @param io_service The io_service object that the raw socket will use - * to dispatch handlers for any asynchronous operations performed on the - * socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @param native_socket The new underlying socket implementation. - * - * @throws asio::system_error Thrown on failure. - */ - basic_raw_socket(asio::io_service& io_service, - const protocol_type& protocol, const native_handle_type& native_socket) - : basic_socket( - io_service, protocol, native_socket) - { - } - -#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - /// Move-construct a basic_raw_socket from another. - /** - * This constructor moves a raw socket from one object to another. - * - * @param other The other basic_raw_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_raw_socket(io_service&) constructor. - */ - basic_raw_socket(basic_raw_socket&& other) - : basic_socket( - ASIO_MOVE_CAST(basic_raw_socket)(other)) - { - } - - /// Move-assign a basic_raw_socket from another. - /** - * This assignment operator moves a raw socket from one object to another. - * - * @param other The other basic_raw_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_raw_socket(io_service&) constructor. - */ - basic_raw_socket& operator=(basic_raw_socket&& other) - { - basic_socket::operator=( - ASIO_MOVE_CAST(basic_raw_socket)(other)); - return *this; - } - - /// Move-construct a basic_raw_socket from a socket of another protocol type. - /** - * This constructor moves a raw socket from one object to another. - * - * @param other The other basic_raw_socket object from which the move will - * occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_raw_socket(io_service&) constructor. - */ - template - basic_raw_socket(basic_raw_socket&& other, - typename enable_if::value>::type* = 0) - : basic_socket( - ASIO_MOVE_CAST2(basic_raw_socket< - Protocol1, RawSocketService1>)(other)) - { - } - - /// Move-assign a basic_raw_socket from a socket of another protocol type. - /** - * This assignment operator moves a raw socket from one object to another. - * - * @param other The other basic_raw_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_raw_socket(io_service&) constructor. - */ - template - typename enable_if::value, - basic_raw_socket>::type& operator=( - basic_raw_socket&& other) - { - basic_socket::operator=( - ASIO_MOVE_CAST2(basic_raw_socket< - Protocol1, RawSocketService1>)(other)); - return *this; - } -#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - - /// Send some data on a connected socket. - /** - * This function is used to send data on the raw socket. The function call - * will block until the data has been sent successfully or an error occurs. - * - * @param buffers One ore more data buffers to be sent on the socket. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - * - * @note The send operation can only be used with a connected socket. Use - * the send_to function to send data on an unconnected raw socket. - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code socket.send(asio::buffer(data, size)); @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t send(const ConstBufferSequence& buffers) - { - asio::error_code ec; - std::size_t s = this->get_service().send( - this->get_implementation(), buffers, 0, ec); - asio::detail::throw_error(ec, "send"); - return s; - } - - /// Send some data on a connected socket. - /** - * This function is used to send data on the raw socket. The function call - * will block until the data has been sent successfully or an error occurs. - * - * @param buffers One ore more data buffers to be sent on the socket. - * - * @param flags Flags specifying how the send call is to be made. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - * - * @note The send operation can only be used with a connected socket. Use - * the send_to function to send data on an unconnected raw socket. - */ - template - std::size_t send(const ConstBufferSequence& buffers, - socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().send( - this->get_implementation(), buffers, flags, ec); - asio::detail::throw_error(ec, "send"); - return s; - } - - /// Send some data on a connected socket. - /** - * This function is used to send data on the raw socket. The function call - * will block until the data has been sent successfully or an error occurs. - * - * @param buffers One or more data buffers to be sent on the socket. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes sent. - * - * @note The send operation can only be used with a connected socket. Use - * the send_to function to send data on an unconnected raw socket. - */ - template - std::size_t send(const ConstBufferSequence& buffers, - socket_base::message_flags flags, asio::error_code& ec) - { - return this->get_service().send( - this->get_implementation(), buffers, flags, ec); - } - - /// Start an asynchronous send on a connected socket. - /** - * This function is used to send data on the raw socket. The function call - * will block until the data has been sent successfully or an error occurs. - * - * @param buffers One or more data buffers to be sent on the socket. Although - * the buffers object may be copied as necessary, ownership of the underlying - * memory blocks is retained by the caller, which must guarantee that they - * remain valid until the handler is called. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_send operation can only be used with a connected socket. - * Use the async_send_to function to send data on an unconnected raw - * socket. - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * socket.async_send(asio::buffer(data, size), handler); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send(const ConstBufferSequence& buffers, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send(this->get_implementation(), - buffers, 0, ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Start an asynchronous send on a connected socket. - /** - * This function is used to send data on the raw socket. The function call - * will block until the data has been sent successfully or an error occurs. - * - * @param buffers One or more data buffers to be sent on the socket. Although - * the buffers object may be copied as necessary, ownership of the underlying - * memory blocks is retained by the caller, which must guarantee that they - * remain valid until the handler is called. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_send operation can only be used with a connected socket. - * Use the async_send_to function to send data on an unconnected raw - * socket. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send(const ConstBufferSequence& buffers, - socket_base::message_flags flags, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send(this->get_implementation(), - buffers, flags, ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Send raw data to the specified endpoint. - /** - * This function is used to send raw data to the specified remote endpoint. - * The function call will block until the data has been sent successfully or - * an error occurs. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * - * @param destination The remote endpoint to which the data will be sent. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * asio::ip::udp::endpoint destination( - * asio::ip::address::from_string("1.2.3.4"), 12345); - * socket.send_to(asio::buffer(data, size), destination); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination) - { - asio::error_code ec; - std::size_t s = this->get_service().send_to( - this->get_implementation(), buffers, destination, 0, ec); - asio::detail::throw_error(ec, "send_to"); - return s; - } - - /// Send raw data to the specified endpoint. - /** - * This function is used to send raw data to the specified remote endpoint. - * The function call will block until the data has been sent successfully or - * an error occurs. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * - * @param destination The remote endpoint to which the data will be sent. - * - * @param flags Flags specifying how the send call is to be made. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - */ - template - std::size_t send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().send_to( - this->get_implementation(), buffers, destination, flags, ec); - asio::detail::throw_error(ec, "send_to"); - return s; - } - - /// Send raw data to the specified endpoint. - /** - * This function is used to send raw data to the specified remote endpoint. - * The function call will block until the data has been sent successfully or - * an error occurs. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * - * @param destination The remote endpoint to which the data will be sent. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes sent. - */ - template - std::size_t send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, socket_base::message_flags flags, - asio::error_code& ec) - { - return this->get_service().send_to(this->get_implementation(), - buffers, destination, flags, ec); - } - - /// Start an asynchronous send. - /** - * This function is used to asynchronously send raw data to the specified - * remote endpoint. The function call always returns immediately. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param destination The remote endpoint to which the data will be sent. - * Copies will be made of the endpoint as required. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * asio::ip::udp::endpoint destination( - * asio::ip::address::from_string("1.2.3.4"), 12345); - * socket.async_send_to( - * asio::buffer(data, size), destination, handler); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send_to(this->get_implementation(), - buffers, destination, 0, ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Start an asynchronous send. - /** - * This function is used to asynchronously send raw data to the specified - * remote endpoint. The function call always returns immediately. - * - * @param buffers One or more data buffers to be sent to the remote endpoint. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param destination The remote endpoint to which the data will be sent. - * Copies will be made of the endpoint as required. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send_to(const ConstBufferSequence& buffers, - const endpoint_type& destination, socket_base::message_flags flags, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send_to( - this->get_implementation(), buffers, destination, flags, - ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Receive some data on a connected socket. - /** - * This function is used to receive data on the raw socket. The function - * call will block until data has been received successfully or an error - * occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - * - * @note The receive operation can only be used with a connected socket. Use - * the receive_from function to receive data on an unconnected raw - * socket. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code socket.receive(asio::buffer(data, size)); @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t receive(const MutableBufferSequence& buffers) - { - asio::error_code ec; - std::size_t s = this->get_service().receive( - this->get_implementation(), buffers, 0, ec); - asio::detail::throw_error(ec, "receive"); - return s; - } - - /// Receive some data on a connected socket. - /** - * This function is used to receive data on the raw socket. The function - * call will block until data has been received successfully or an error - * occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - * - * @note The receive operation can only be used with a connected socket. Use - * the receive_from function to receive data on an unconnected raw - * socket. - */ - template - std::size_t receive(const MutableBufferSequence& buffers, - socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().receive( - this->get_implementation(), buffers, flags, ec); - asio::detail::throw_error(ec, "receive"); - return s; - } - - /// Receive some data on a connected socket. - /** - * This function is used to receive data on the raw socket. The function - * call will block until data has been received successfully or an error - * occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes received. - * - * @note The receive operation can only be used with a connected socket. Use - * the receive_from function to receive data on an unconnected raw - * socket. - */ - template - std::size_t receive(const MutableBufferSequence& buffers, - socket_base::message_flags flags, asio::error_code& ec) - { - return this->get_service().receive( - this->get_implementation(), buffers, flags, ec); - } - - /// Start an asynchronous receive on a connected socket. - /** - * This function is used to asynchronously receive data from the raw - * socket. The function call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_receive operation can only be used with a connected socket. - * Use the async_receive_from function to receive data on an unconnected - * raw socket. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * socket.async_receive(asio::buffer(data, size), handler); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive(const MutableBufferSequence& buffers, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive(this->get_implementation(), - buffers, 0, ASIO_MOVE_CAST(ReadHandler)(handler)); - } - - /// Start an asynchronous receive on a connected socket. - /** - * This function is used to asynchronously receive data from the raw - * socket. The function call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The async_receive operation can only be used with a connected socket. - * Use the async_receive_from function to receive data on an unconnected - * raw socket. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive(const MutableBufferSequence& buffers, - socket_base::message_flags flags, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive(this->get_implementation(), - buffers, flags, ASIO_MOVE_CAST(ReadHandler)(handler)); - } - - /// Receive raw data with the endpoint of the sender. - /** - * This function is used to receive raw data. The function call will block - * until data has been received successfully or an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the data. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * asio::ip::udp::endpoint sender_endpoint; - * socket.receive_from( - * asio::buffer(data, size), sender_endpoint); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint) - { - asio::error_code ec; - std::size_t s = this->get_service().receive_from( - this->get_implementation(), buffers, sender_endpoint, 0, ec); - asio::detail::throw_error(ec, "receive_from"); - return s; - } - - /// Receive raw data with the endpoint of the sender. - /** - * This function is used to receive raw data. The function call will block - * until data has been received successfully or an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the data. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. - */ - template - std::size_t receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().receive_from( - this->get_implementation(), buffers, sender_endpoint, flags, ec); - asio::detail::throw_error(ec, "receive_from"); - return s; - } - - /// Receive raw data with the endpoint of the sender. - /** - * This function is used to receive raw data. The function call will block - * until data has been received successfully or an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the data. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes received. - */ - template - std::size_t receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, socket_base::message_flags flags, - asio::error_code& ec) - { - return this->get_service().receive_from(this->get_implementation(), - buffers, sender_endpoint, flags, ec); - } - - /// Start an asynchronous receive. - /** - * This function is used to asynchronously receive raw data. The function - * call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the data. Ownership of the sender_endpoint object - * is retained by the caller, which must guarantee that it is valid until the - * handler is called. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code socket.async_receive_from( - * asio::buffer(data, size), 0, sender_endpoint, handler); @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive_from( - this->get_implementation(), buffers, sender_endpoint, 0, - ASIO_MOVE_CAST(ReadHandler)(handler)); - } - - /// Start an asynchronous receive. - /** - * This function is used to asynchronously receive raw data. The function - * call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param sender_endpoint An endpoint object that receives the endpoint of - * the remote sender of the data. Ownership of the sender_endpoint object - * is retained by the caller, which must guarantee that it is valid until the - * handler is called. - * - * @param flags Flags specifying how the receive call is to be made. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive_from(const MutableBufferSequence& buffers, - endpoint_type& sender_endpoint, socket_base::message_flags flags, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive_from( - this->get_implementation(), buffers, sender_endpoint, flags, - ASIO_MOVE_CAST(ReadHandler)(handler)); - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // ASIO_BASIC_RAW_SOCKET_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_seq_packet_socket.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_seq_packet_socket.hpp deleted file mode 100644 index a3d720ebe0b84..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_seq_packet_socket.hpp +++ /dev/null @@ -1,565 +0,0 @@ -// -// basic_seq_packet_socket.hpp -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_SEQ_PACKET_SOCKET_HPP -#define ASIO_BASIC_SEQ_PACKET_SOCKET_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" -#include -#include "asio/basic_socket.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/error.hpp" -#include "asio/seq_packet_socket_service.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides sequenced packet socket functionality. -/** - * The basic_seq_packet_socket class template provides asynchronous and blocking - * sequenced packet socket functionality. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - */ -template > -class basic_seq_packet_socket - : public basic_socket -{ -public: - /// (Deprecated: Use native_handle_type.) The native representation of a - /// socket. - typedef typename SeqPacketSocketService::native_handle_type native_type; - - /// The native representation of a socket. - typedef typename SeqPacketSocketService::native_handle_type - native_handle_type; - - /// The protocol type. - typedef Protocol protocol_type; - - /// The endpoint type. - typedef typename Protocol::endpoint endpoint_type; - - /// Construct a basic_seq_packet_socket without opening it. - /** - * This constructor creates a sequenced packet socket without opening it. The - * socket needs to be opened and then connected or accepted before data can - * be sent or received on it. - * - * @param io_service The io_service object that the sequenced packet socket - * will use to dispatch handlers for any asynchronous operations performed on - * the socket. - */ - explicit basic_seq_packet_socket(asio::io_service& io_service) - : basic_socket(io_service) - { - } - - /// Construct and open a basic_seq_packet_socket. - /** - * This constructor creates and opens a sequenced_packet socket. The socket - * needs to be connected or accepted before data can be sent or received on - * it. - * - * @param io_service The io_service object that the sequenced packet socket - * will use to dispatch handlers for any asynchronous operations performed on - * the socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @throws asio::system_error Thrown on failure. - */ - basic_seq_packet_socket(asio::io_service& io_service, - const protocol_type& protocol) - : basic_socket(io_service, protocol) - { - } - - /// Construct a basic_seq_packet_socket, opening it and binding it to the - /// given local endpoint. - /** - * This constructor creates a sequenced packet socket and automatically opens - * it bound to the specified endpoint on the local machine. The protocol used - * is the protocol associated with the given endpoint. - * - * @param io_service The io_service object that the sequenced packet socket - * will use to dispatch handlers for any asynchronous operations performed on - * the socket. - * - * @param endpoint An endpoint on the local machine to which the sequenced - * packet socket will be bound. - * - * @throws asio::system_error Thrown on failure. - */ - basic_seq_packet_socket(asio::io_service& io_service, - const endpoint_type& endpoint) - : basic_socket(io_service, endpoint) - { - } - - /// Construct a basic_seq_packet_socket on an existing native socket. - /** - * This constructor creates a sequenced packet socket object to hold an - * existing native socket. - * - * @param io_service The io_service object that the sequenced packet socket - * will use to dispatch handlers for any asynchronous operations performed on - * the socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @param native_socket The new underlying socket implementation. - * - * @throws asio::system_error Thrown on failure. - */ - basic_seq_packet_socket(asio::io_service& io_service, - const protocol_type& protocol, const native_handle_type& native_socket) - : basic_socket( - io_service, protocol, native_socket) - { - } - -#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - /// Move-construct a basic_seq_packet_socket from another. - /** - * This constructor moves a sequenced packet socket from one object to - * another. - * - * @param other The other basic_seq_packet_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_seq_packet_socket(io_service&) constructor. - */ - basic_seq_packet_socket(basic_seq_packet_socket&& other) - : basic_socket( - ASIO_MOVE_CAST(basic_seq_packet_socket)(other)) - { - } - - /// Move-assign a basic_seq_packet_socket from another. - /** - * This assignment operator moves a sequenced packet socket from one object to - * another. - * - * @param other The other basic_seq_packet_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_seq_packet_socket(io_service&) constructor. - */ - basic_seq_packet_socket& operator=(basic_seq_packet_socket&& other) - { - basic_socket::operator=( - ASIO_MOVE_CAST(basic_seq_packet_socket)(other)); - return *this; - } - - /// Move-construct a basic_seq_packet_socket from a socket of another protocol - /// type. - /** - * This constructor moves a sequenced packet socket from one object to - * another. - * - * @param other The other basic_seq_packet_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_seq_packet_socket(io_service&) constructor. - */ - template - basic_seq_packet_socket( - basic_seq_packet_socket&& other, - typename enable_if::value>::type* = 0) - : basic_socket( - ASIO_MOVE_CAST2(basic_seq_packet_socket< - Protocol1, SeqPacketSocketService1>)(other)) - { - } - - /// Move-assign a basic_seq_packet_socket from a socket of another protocol - /// type. - /** - * This assignment operator moves a sequenced packet socket from one object to - * another. - * - * @param other The other basic_seq_packet_socket object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_seq_packet_socket(io_service&) constructor. - */ - template - typename enable_if::value, - basic_seq_packet_socket>::type& operator=( - basic_seq_packet_socket&& other) - { - basic_socket::operator=( - ASIO_MOVE_CAST2(basic_seq_packet_socket< - Protocol1, SeqPacketSocketService1>)(other)); - return *this; - } -#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - - /// Send some data on the socket. - /** - * This function is used to send data on the sequenced packet socket. The - * function call will block until the data has been sent successfully, or an - * until error occurs. - * - * @param buffers One or more data buffers to be sent on the socket. - * - * @param flags Flags specifying how the send call is to be made. - * - * @returns The number of bytes sent. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * socket.send(asio::buffer(data, size), 0); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t send(const ConstBufferSequence& buffers, - socket_base::message_flags flags) - { - asio::error_code ec; - std::size_t s = this->get_service().send( - this->get_implementation(), buffers, flags, ec); - asio::detail::throw_error(ec, "send"); - return s; - } - - /// Send some data on the socket. - /** - * This function is used to send data on the sequenced packet socket. The - * function call will block the data has been sent successfully, or an until - * error occurs. - * - * @param buffers One or more data buffers to be sent on the socket. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes sent. Returns 0 if an error occurred. - * - * @note The send operation may not transmit all of the data to the peer. - * Consider using the @ref write function if you need to ensure that all data - * is written before the blocking operation completes. - */ - template - std::size_t send(const ConstBufferSequence& buffers, - socket_base::message_flags flags, asio::error_code& ec) - { - return this->get_service().send( - this->get_implementation(), buffers, flags, ec); - } - - /// Start an asynchronous send. - /** - * This function is used to asynchronously send data on the sequenced packet - * socket. The function call always returns immediately. - * - * @param buffers One or more data buffers to be sent on the socket. Although - * the buffers object may be copied as necessary, ownership of the underlying - * memory blocks is retained by the caller, which must guarantee that they - * remain valid until the handler is called. - * - * @param flags Flags specifying how the send call is to be made. - * - * @param handler The handler to be called when the send operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes sent. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * To send a single data buffer use the @ref buffer function as follows: - * @code - * socket.async_send(asio::buffer(data, size), 0, handler); - * @endcode - * See the @ref buffer documentation for information on sending multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_send(const ConstBufferSequence& buffers, - socket_base::message_flags flags, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_send(this->get_implementation(), - buffers, flags, ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Receive some data on the socket. - /** - * This function is used to receive data on the sequenced packet socket. The - * function call will block until data has been received successfully, or - * until an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param out_flags After the receive call completes, contains flags - * associated with the received data. For example, if the - * socket_base::message_end_of_record bit is set then the received data marks - * the end of a record. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. An error code of - * asio::error::eof indicates that the connection was closed by the - * peer. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * socket.receive(asio::buffer(data, size), out_flags); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t receive(const MutableBufferSequence& buffers, - socket_base::message_flags& out_flags) - { - asio::error_code ec; - std::size_t s = this->get_service().receive( - this->get_implementation(), buffers, 0, out_flags, ec); - asio::detail::throw_error(ec, "receive"); - return s; - } - - /// Receive some data on the socket. - /** - * This function is used to receive data on the sequenced packet socket. The - * function call will block until data has been received successfully, or - * until an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param in_flags Flags specifying how the receive call is to be made. - * - * @param out_flags After the receive call completes, contains flags - * associated with the received data. For example, if the - * socket_base::message_end_of_record bit is set then the received data marks - * the end of a record. - * - * @returns The number of bytes received. - * - * @throws asio::system_error Thrown on failure. An error code of - * asio::error::eof indicates that the connection was closed by the - * peer. - * - * @note The receive operation may not receive all of the requested number of - * bytes. Consider using the @ref read function if you need to ensure that the - * requested amount of data is read before the blocking operation completes. - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * socket.receive(asio::buffer(data, size), 0, out_flags); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t receive(const MutableBufferSequence& buffers, - socket_base::message_flags in_flags, - socket_base::message_flags& out_flags) - { - asio::error_code ec; - std::size_t s = this->get_service().receive( - this->get_implementation(), buffers, in_flags, out_flags, ec); - asio::detail::throw_error(ec, "receive"); - return s; - } - - /// Receive some data on a connected socket. - /** - * This function is used to receive data on the sequenced packet socket. The - * function call will block until data has been received successfully, or - * until an error occurs. - * - * @param buffers One or more buffers into which the data will be received. - * - * @param in_flags Flags specifying how the receive call is to be made. - * - * @param out_flags After the receive call completes, contains flags - * associated with the received data. For example, if the - * socket_base::message_end_of_record bit is set then the received data marks - * the end of a record. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes received. Returns 0 if an error occurred. - * - * @note The receive operation may not receive all of the requested number of - * bytes. Consider using the @ref read function if you need to ensure that the - * requested amount of data is read before the blocking operation completes. - */ - template - std::size_t receive(const MutableBufferSequence& buffers, - socket_base::message_flags in_flags, - socket_base::message_flags& out_flags, asio::error_code& ec) - { - return this->get_service().receive(this->get_implementation(), - buffers, in_flags, out_flags, ec); - } - - /// Start an asynchronous receive. - /** - * This function is used to asynchronously receive data from the sequenced - * packet socket. The function call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param out_flags Once the asynchronous operation completes, contains flags - * associated with the received data. For example, if the - * socket_base::message_end_of_record bit is set then the received data marks - * the end of a record. The caller must guarantee that the referenced - * variable remains valid until the handler is called. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * socket.async_receive(asio::buffer(data, size), out_flags, handler); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive(const MutableBufferSequence& buffers, - socket_base::message_flags& out_flags, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive( - this->get_implementation(), buffers, 0, out_flags, - ASIO_MOVE_CAST(ReadHandler)(handler)); - } - - /// Start an asynchronous receive. - /** - * This function is used to asynchronously receive data from the sequenced - * data socket. The function call always returns immediately. - * - * @param buffers One or more buffers into which the data will be received. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param in_flags Flags specifying how the receive call is to be made. - * - * @param out_flags Once the asynchronous operation completes, contains flags - * associated with the received data. For example, if the - * socket_base::message_end_of_record bit is set then the received data marks - * the end of a record. The caller must guarantee that the referenced - * variable remains valid until the handler is called. - * - * @param handler The handler to be called when the receive operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes received. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * To receive into a single data buffer use the @ref buffer function as - * follows: - * @code - * socket.async_receive( - * asio::buffer(data, size), - * 0, out_flags, handler); - * @endcode - * See the @ref buffer documentation for information on receiving into - * multiple buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_receive(const MutableBufferSequence& buffers, - socket_base::message_flags in_flags, - socket_base::message_flags& out_flags, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_receive( - this->get_implementation(), buffers, in_flags, out_flags, - ASIO_MOVE_CAST(ReadHandler)(handler)); - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // ASIO_BASIC_SEQ_PACKET_SOCKET_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_serial_port.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_serial_port.hpp deleted file mode 100644 index 007d293fed1db..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_serial_port.hpp +++ /dev/null @@ -1,695 +0,0 @@ -// -// basic_serial_port.hpp -// ~~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_SERIAL_PORT_HPP -#define ASIO_BASIC_SERIAL_PORT_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" - -#if defined(ASIO_HAS_SERIAL_PORT) \ - || defined(GENERATING_DOCUMENTATION) - -#include -#include "asio/basic_io_object.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/error.hpp" -#include "asio/serial_port_base.hpp" -#include "asio/serial_port_service.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides serial port functionality. -/** - * The basic_serial_port class template provides functionality that is common - * to all serial ports. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - */ -template -class basic_serial_port - : public basic_io_object, - public serial_port_base -{ -public: - /// (Deprecated: Use native_handle_type.) The native representation of a - /// serial port. - typedef typename SerialPortService::native_handle_type native_type; - - /// The native representation of a serial port. - typedef typename SerialPortService::native_handle_type native_handle_type; - - /// A basic_serial_port is always the lowest layer. - typedef basic_serial_port lowest_layer_type; - - /// Construct a basic_serial_port without opening it. - /** - * This constructor creates a serial port without opening it. - * - * @param io_service The io_service object that the serial port will use to - * dispatch handlers for any asynchronous operations performed on the port. - */ - explicit basic_serial_port(asio::io_service& io_service) - : basic_io_object(io_service) - { - } - - /// Construct and open a basic_serial_port. - /** - * This constructor creates and opens a serial port for the specified device - * name. - * - * @param io_service The io_service object that the serial port will use to - * dispatch handlers for any asynchronous operations performed on the port. - * - * @param device The platform-specific device name for this serial - * port. - */ - explicit basic_serial_port(asio::io_service& io_service, - const char* device) - : basic_io_object(io_service) - { - asio::error_code ec; - this->get_service().open(this->get_implementation(), device, ec); - asio::detail::throw_error(ec, "open"); - } - - /// Construct and open a basic_serial_port. - /** - * This constructor creates and opens a serial port for the specified device - * name. - * - * @param io_service The io_service object that the serial port will use to - * dispatch handlers for any asynchronous operations performed on the port. - * - * @param device The platform-specific device name for this serial - * port. - */ - explicit basic_serial_port(asio::io_service& io_service, - const std::string& device) - : basic_io_object(io_service) - { - asio::error_code ec; - this->get_service().open(this->get_implementation(), device, ec); - asio::detail::throw_error(ec, "open"); - } - - /// Construct a basic_serial_port on an existing native serial port. - /** - * This constructor creates a serial port object to hold an existing native - * serial port. - * - * @param io_service The io_service object that the serial port will use to - * dispatch handlers for any asynchronous operations performed on the port. - * - * @param native_serial_port A native serial port. - * - * @throws asio::system_error Thrown on failure. - */ - basic_serial_port(asio::io_service& io_service, - const native_handle_type& native_serial_port) - : basic_io_object(io_service) - { - asio::error_code ec; - this->get_service().assign(this->get_implementation(), - native_serial_port, ec); - asio::detail::throw_error(ec, "assign"); - } - -#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - /// Move-construct a basic_serial_port from another. - /** - * This constructor moves a serial port from one object to another. - * - * @param other The other basic_serial_port object from which the move will - * occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_serial_port(io_service&) constructor. - */ - basic_serial_port(basic_serial_port&& other) - : basic_io_object( - ASIO_MOVE_CAST(basic_serial_port)(other)) - { - } - - /// Move-assign a basic_serial_port from another. - /** - * This assignment operator moves a serial port from one object to another. - * - * @param other The other basic_serial_port object from which the move will - * occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_serial_port(io_service&) constructor. - */ - basic_serial_port& operator=(basic_serial_port&& other) - { - basic_io_object::operator=( - ASIO_MOVE_CAST(basic_serial_port)(other)); - return *this; - } -#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - - /// Get a reference to the lowest layer. - /** - * This function returns a reference to the lowest layer in a stack of - * layers. Since a basic_serial_port cannot contain any further layers, it - * simply returns a reference to itself. - * - * @return A reference to the lowest layer in the stack of layers. Ownership - * is not transferred to the caller. - */ - lowest_layer_type& lowest_layer() - { - return *this; - } - - /// Get a const reference to the lowest layer. - /** - * This function returns a const reference to the lowest layer in a stack of - * layers. Since a basic_serial_port cannot contain any further layers, it - * simply returns a reference to itself. - * - * @return A const reference to the lowest layer in the stack of layers. - * Ownership is not transferred to the caller. - */ - const lowest_layer_type& lowest_layer() const - { - return *this; - } - - /// Open the serial port using the specified device name. - /** - * This function opens the serial port for the specified device name. - * - * @param device The platform-specific device name. - * - * @throws asio::system_error Thrown on failure. - */ - void open(const std::string& device) - { - asio::error_code ec; - this->get_service().open(this->get_implementation(), device, ec); - asio::detail::throw_error(ec, "open"); - } - - /// Open the serial port using the specified device name. - /** - * This function opens the serial port using the given platform-specific - * device name. - * - * @param device The platform-specific device name. - * - * @param ec Set the indicate what error occurred, if any. - */ - asio::error_code open(const std::string& device, - asio::error_code& ec) - { - return this->get_service().open(this->get_implementation(), device, ec); - } - - /// Assign an existing native serial port to the serial port. - /* - * This function opens the serial port to hold an existing native serial port. - * - * @param native_serial_port A native serial port. - * - * @throws asio::system_error Thrown on failure. - */ - void assign(const native_handle_type& native_serial_port) - { - asio::error_code ec; - this->get_service().assign(this->get_implementation(), - native_serial_port, ec); - asio::detail::throw_error(ec, "assign"); - } - - /// Assign an existing native serial port to the serial port. - /* - * This function opens the serial port to hold an existing native serial port. - * - * @param native_serial_port A native serial port. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code assign(const native_handle_type& native_serial_port, - asio::error_code& ec) - { - return this->get_service().assign(this->get_implementation(), - native_serial_port, ec); - } - - /// Determine whether the serial port is open. - bool is_open() const - { - return this->get_service().is_open(this->get_implementation()); - } - - /// Close the serial port. - /** - * This function is used to close the serial port. Any asynchronous read or - * write operations will be cancelled immediately, and will complete with the - * asio::error::operation_aborted error. - * - * @throws asio::system_error Thrown on failure. - */ - void close() - { - asio::error_code ec; - this->get_service().close(this->get_implementation(), ec); - asio::detail::throw_error(ec, "close"); - } - - /// Close the serial port. - /** - * This function is used to close the serial port. Any asynchronous read or - * write operations will be cancelled immediately, and will complete with the - * asio::error::operation_aborted error. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code close(asio::error_code& ec) - { - return this->get_service().close(this->get_implementation(), ec); - } - - /// (Deprecated: Use native_handle().) Get the native serial port - /// representation. - /** - * This function may be used to obtain the underlying representation of the - * serial port. This is intended to allow access to native serial port - * functionality that is not otherwise provided. - */ - native_type native() - { - return this->get_service().native_handle(this->get_implementation()); - } - - /// Get the native serial port representation. - /** - * This function may be used to obtain the underlying representation of the - * serial port. This is intended to allow access to native serial port - * functionality that is not otherwise provided. - */ - native_handle_type native_handle() - { - return this->get_service().native_handle(this->get_implementation()); - } - - /// Cancel all asynchronous operations associated with the serial port. - /** - * This function causes all outstanding asynchronous read or write operations - * to finish immediately, and the handlers for cancelled operations will be - * passed the asio::error::operation_aborted error. - * - * @throws asio::system_error Thrown on failure. - */ - void cancel() - { - asio::error_code ec; - this->get_service().cancel(this->get_implementation(), ec); - asio::detail::throw_error(ec, "cancel"); - } - - /// Cancel all asynchronous operations associated with the serial port. - /** - * This function causes all outstanding asynchronous read or write operations - * to finish immediately, and the handlers for cancelled operations will be - * passed the asio::error::operation_aborted error. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code cancel(asio::error_code& ec) - { - return this->get_service().cancel(this->get_implementation(), ec); - } - - /// Send a break sequence to the serial port. - /** - * This function causes a break sequence of platform-specific duration to be - * sent out the serial port. - * - * @throws asio::system_error Thrown on failure. - */ - void send_break() - { - asio::error_code ec; - this->get_service().send_break(this->get_implementation(), ec); - asio::detail::throw_error(ec, "send_break"); - } - - /// Send a break sequence to the serial port. - /** - * This function causes a break sequence of platform-specific duration to be - * sent out the serial port. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code send_break(asio::error_code& ec) - { - return this->get_service().send_break(this->get_implementation(), ec); - } - - /// Set an option on the serial port. - /** - * This function is used to set an option on the serial port. - * - * @param option The option value to be set on the serial port. - * - * @throws asio::system_error Thrown on failure. - * - * @sa SettableSerialPortOption @n - * asio::serial_port_base::baud_rate @n - * asio::serial_port_base::flow_control @n - * asio::serial_port_base::parity @n - * asio::serial_port_base::stop_bits @n - * asio::serial_port_base::character_size - */ - template - void set_option(const SettableSerialPortOption& option) - { - asio::error_code ec; - this->get_service().set_option(this->get_implementation(), option, ec); - asio::detail::throw_error(ec, "set_option"); - } - - /// Set an option on the serial port. - /** - * This function is used to set an option on the serial port. - * - * @param option The option value to be set on the serial port. - * - * @param ec Set to indicate what error occurred, if any. - * - * @sa SettableSerialPortOption @n - * asio::serial_port_base::baud_rate @n - * asio::serial_port_base::flow_control @n - * asio::serial_port_base::parity @n - * asio::serial_port_base::stop_bits @n - * asio::serial_port_base::character_size - */ - template - asio::error_code set_option(const SettableSerialPortOption& option, - asio::error_code& ec) - { - return this->get_service().set_option( - this->get_implementation(), option, ec); - } - - /// Get an option from the serial port. - /** - * This function is used to get the current value of an option on the serial - * port. - * - * @param option The option value to be obtained from the serial port. - * - * @throws asio::system_error Thrown on failure. - * - * @sa GettableSerialPortOption @n - * asio::serial_port_base::baud_rate @n - * asio::serial_port_base::flow_control @n - * asio::serial_port_base::parity @n - * asio::serial_port_base::stop_bits @n - * asio::serial_port_base::character_size - */ - template - void get_option(GettableSerialPortOption& option) - { - asio::error_code ec; - this->get_service().get_option(this->get_implementation(), option, ec); - asio::detail::throw_error(ec, "get_option"); - } - - /// Get an option from the serial port. - /** - * This function is used to get the current value of an option on the serial - * port. - * - * @param option The option value to be obtained from the serial port. - * - * @param ec Set to indicate what error occured, if any. - * - * @sa GettableSerialPortOption @n - * asio::serial_port_base::baud_rate @n - * asio::serial_port_base::flow_control @n - * asio::serial_port_base::parity @n - * asio::serial_port_base::stop_bits @n - * asio::serial_port_base::character_size - */ - template - asio::error_code get_option(GettableSerialPortOption& option, - asio::error_code& ec) - { - return this->get_service().get_option( - this->get_implementation(), option, ec); - } - - /// Write some data to the serial port. - /** - * This function is used to write data to the serial port. The function call - * will block until one or more bytes of the data has been written - * successfully, or until an error occurs. - * - * @param buffers One or more data buffers to be written to the serial port. - * - * @returns The number of bytes written. - * - * @throws asio::system_error Thrown on failure. An error code of - * asio::error::eof indicates that the connection was closed by the - * peer. - * - * @note The write_some operation may not transmit all of the data to the - * peer. Consider using the @ref write function if you need to ensure that - * all data is written before the blocking operation completes. - * - * @par Example - * To write a single data buffer use the @ref buffer function as follows: - * @code - * serial_port.write_some(asio::buffer(data, size)); - * @endcode - * See the @ref buffer documentation for information on writing multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t write_some(const ConstBufferSequence& buffers) - { - asio::error_code ec; - std::size_t s = this->get_service().write_some( - this->get_implementation(), buffers, ec); - asio::detail::throw_error(ec, "write_some"); - return s; - } - - /// Write some data to the serial port. - /** - * This function is used to write data to the serial port. The function call - * will block until one or more bytes of the data has been written - * successfully, or until an error occurs. - * - * @param buffers One or more data buffers to be written to the serial port. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes written. Returns 0 if an error occurred. - * - * @note The write_some operation may not transmit all of the data to the - * peer. Consider using the @ref write function if you need to ensure that - * all data is written before the blocking operation completes. - */ - template - std::size_t write_some(const ConstBufferSequence& buffers, - asio::error_code& ec) - { - return this->get_service().write_some( - this->get_implementation(), buffers, ec); - } - - /// Start an asynchronous write. - /** - * This function is used to asynchronously write data to the serial port. - * The function call always returns immediately. - * - * @param buffers One or more data buffers to be written to the serial port. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param handler The handler to be called when the write operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes written. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The write operation may not transmit all of the data to the peer. - * Consider using the @ref async_write function if you need to ensure that all - * data is written before the asynchronous operation completes. - * - * @par Example - * To write a single data buffer use the @ref buffer function as follows: - * @code - * serial_port.async_write_some(asio::buffer(data, size), handler); - * @endcode - * See the @ref buffer documentation for information on writing multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(WriteHandler, - void (asio::error_code, std::size_t)) - async_write_some(const ConstBufferSequence& buffers, - ASIO_MOVE_ARG(WriteHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a WriteHandler. - ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; - - return this->get_service().async_write_some(this->get_implementation(), - buffers, ASIO_MOVE_CAST(WriteHandler)(handler)); - } - - /// Read some data from the serial port. - /** - * This function is used to read data from the serial port. The function - * call will block until one or more bytes of data has been read successfully, - * or until an error occurs. - * - * @param buffers One or more buffers into which the data will be read. - * - * @returns The number of bytes read. - * - * @throws asio::system_error Thrown on failure. An error code of - * asio::error::eof indicates that the connection was closed by the - * peer. - * - * @note The read_some operation may not read all of the requested number of - * bytes. Consider using the @ref read function if you need to ensure that - * the requested amount of data is read before the blocking operation - * completes. - * - * @par Example - * To read into a single data buffer use the @ref buffer function as follows: - * @code - * serial_port.read_some(asio::buffer(data, size)); - * @endcode - * See the @ref buffer documentation for information on reading into multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - std::size_t read_some(const MutableBufferSequence& buffers) - { - asio::error_code ec; - std::size_t s = this->get_service().read_some( - this->get_implementation(), buffers, ec); - asio::detail::throw_error(ec, "read_some"); - return s; - } - - /// Read some data from the serial port. - /** - * This function is used to read data from the serial port. The function - * call will block until one or more bytes of data has been read successfully, - * or until an error occurs. - * - * @param buffers One or more buffers into which the data will be read. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns The number of bytes read. Returns 0 if an error occurred. - * - * @note The read_some operation may not read all of the requested number of - * bytes. Consider using the @ref read function if you need to ensure that - * the requested amount of data is read before the blocking operation - * completes. - */ - template - std::size_t read_some(const MutableBufferSequence& buffers, - asio::error_code& ec) - { - return this->get_service().read_some( - this->get_implementation(), buffers, ec); - } - - /// Start an asynchronous read. - /** - * This function is used to asynchronously read data from the serial port. - * The function call always returns immediately. - * - * @param buffers One or more buffers into which the data will be read. - * Although the buffers object may be copied as necessary, ownership of the - * underlying memory blocks is retained by the caller, which must guarantee - * that they remain valid until the handler is called. - * - * @param handler The handler to be called when the read operation completes. - * Copies will be made of the handler as required. The function signature of - * the handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * std::size_t bytes_transferred // Number of bytes read. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @note The read operation may not read all of the requested number of bytes. - * Consider using the @ref async_read function if you need to ensure that the - * requested amount of data is read before the asynchronous operation - * completes. - * - * @par Example - * To read into a single data buffer use the @ref buffer function as follows: - * @code - * serial_port.async_read_some(asio::buffer(data, size), handler); - * @endcode - * See the @ref buffer documentation for information on reading into multiple - * buffers in one go, and how to use it with arrays, boost::array or - * std::vector. - */ - template - ASIO_INITFN_RESULT_TYPE(ReadHandler, - void (asio::error_code, std::size_t)) - async_read_some(const MutableBufferSequence& buffers, - ASIO_MOVE_ARG(ReadHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ReadHandler. - ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; - - return this->get_service().async_read_some(this->get_implementation(), - buffers, ASIO_MOVE_CAST(ReadHandler)(handler)); - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // defined(ASIO_HAS_SERIAL_PORT) - // || defined(GENERATING_DOCUMENTATION) - -#endif // ASIO_BASIC_SERIAL_PORT_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_signal_set.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_signal_set.hpp deleted file mode 100644 index 2dd71ceec7011..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_signal_set.hpp +++ /dev/null @@ -1,384 +0,0 @@ -// -// basic_signal_set.hpp -// ~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_SIGNAL_SET_HPP -#define ASIO_BASIC_SIGNAL_SET_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" - -#include "asio/basic_io_object.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/error.hpp" -#include "asio/signal_set_service.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides signal functionality. -/** - * The basic_signal_set class template provides the ability to perform an - * asynchronous wait for one or more signals to occur. - * - * Most applications will use the asio::signal_set typedef. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - * - * @par Example - * Performing an asynchronous wait: - * @code - * void handler( - * const asio::error_code& error, - * int signal_number) - * { - * if (!error) - * { - * // A signal occurred. - * } - * } - * - * ... - * - * // Construct a signal set registered for process termination. - * asio::signal_set signals(io_service, SIGINT, SIGTERM); - * - * // Start an asynchronous wait for one of the signals to occur. - * signals.async_wait(handler); - * @endcode - * - * @par Queueing of signal notifications - * - * If a signal is registered with a signal_set, and the signal occurs when - * there are no waiting handlers, then the signal notification is queued. The - * next async_wait operation on that signal_set will dequeue the notification. - * If multiple notifications are queued, subsequent async_wait operations - * dequeue them one at a time. Signal notifications are dequeued in order of - * ascending signal number. - * - * If a signal number is removed from a signal_set (using the @c remove or @c - * erase member functions) then any queued notifications for that signal are - * discarded. - * - * @par Multiple registration of signals - * - * The same signal number may be registered with different signal_set objects. - * When the signal occurs, one handler is called for each signal_set object. - * - * Note that multiple registration only works for signals that are registered - * using Asio. The application must not also register a signal handler using - * functions such as @c signal() or @c sigaction(). - * - * @par Signal masking on POSIX platforms - * - * POSIX allows signals to be blocked using functions such as @c sigprocmask() - * and @c pthread_sigmask(). For signals to be delivered, programs must ensure - * that any signals registered using signal_set objects are unblocked in at - * least one thread. - */ -template -class basic_signal_set - : public basic_io_object -{ -public: - /// Construct a signal set without adding any signals. - /** - * This constructor creates a signal set without registering for any signals. - * - * @param io_service The io_service object that the signal set will use to - * dispatch handlers for any asynchronous operations performed on the set. - */ - explicit basic_signal_set(asio::io_service& io_service) - : basic_io_object(io_service) - { - } - - /// Construct a signal set and add one signal. - /** - * This constructor creates a signal set and registers for one signal. - * - * @param io_service The io_service object that the signal set will use to - * dispatch handlers for any asynchronous operations performed on the set. - * - * @param signal_number_1 The signal number to be added. - * - * @note This constructor is equivalent to performing: - * @code asio::signal_set signals(io_service); - * signals.add(signal_number_1); @endcode - */ - basic_signal_set(asio::io_service& io_service, int signal_number_1) - : basic_io_object(io_service) - { - asio::error_code ec; - this->service.add(this->implementation, signal_number_1, ec); - asio::detail::throw_error(ec, "add"); - } - - /// Construct a signal set and add two signals. - /** - * This constructor creates a signal set and registers for two signals. - * - * @param io_service The io_service object that the signal set will use to - * dispatch handlers for any asynchronous operations performed on the set. - * - * @param signal_number_1 The first signal number to be added. - * - * @param signal_number_2 The second signal number to be added. - * - * @note This constructor is equivalent to performing: - * @code asio::signal_set signals(io_service); - * signals.add(signal_number_1); - * signals.add(signal_number_2); @endcode - */ - basic_signal_set(asio::io_service& io_service, int signal_number_1, - int signal_number_2) - : basic_io_object(io_service) - { - asio::error_code ec; - this->service.add(this->implementation, signal_number_1, ec); - asio::detail::throw_error(ec, "add"); - this->service.add(this->implementation, signal_number_2, ec); - asio::detail::throw_error(ec, "add"); - } - - /// Construct a signal set and add three signals. - /** - * This constructor creates a signal set and registers for three signals. - * - * @param io_service The io_service object that the signal set will use to - * dispatch handlers for any asynchronous operations performed on the set. - * - * @param signal_number_1 The first signal number to be added. - * - * @param signal_number_2 The second signal number to be added. - * - * @param signal_number_3 The third signal number to be added. - * - * @note This constructor is equivalent to performing: - * @code asio::signal_set signals(io_service); - * signals.add(signal_number_1); - * signals.add(signal_number_2); - * signals.add(signal_number_3); @endcode - */ - basic_signal_set(asio::io_service& io_service, int signal_number_1, - int signal_number_2, int signal_number_3) - : basic_io_object(io_service) - { - asio::error_code ec; - this->service.add(this->implementation, signal_number_1, ec); - asio::detail::throw_error(ec, "add"); - this->service.add(this->implementation, signal_number_2, ec); - asio::detail::throw_error(ec, "add"); - this->service.add(this->implementation, signal_number_3, ec); - asio::detail::throw_error(ec, "add"); - } - - /// Add a signal to a signal_set. - /** - * This function adds the specified signal to the set. It has no effect if the - * signal is already in the set. - * - * @param signal_number The signal to be added to the set. - * - * @throws asio::system_error Thrown on failure. - */ - void add(int signal_number) - { - asio::error_code ec; - this->service.add(this->implementation, signal_number, ec); - asio::detail::throw_error(ec, "add"); - } - - /// Add a signal to a signal_set. - /** - * This function adds the specified signal to the set. It has no effect if the - * signal is already in the set. - * - * @param signal_number The signal to be added to the set. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code add(int signal_number, - asio::error_code& ec) - { - return this->service.add(this->implementation, signal_number, ec); - } - - /// Remove a signal from a signal_set. - /** - * This function removes the specified signal from the set. It has no effect - * if the signal is not in the set. - * - * @param signal_number The signal to be removed from the set. - * - * @throws asio::system_error Thrown on failure. - * - * @note Removes any notifications that have been queued for the specified - * signal number. - */ - void remove(int signal_number) - { - asio::error_code ec; - this->service.remove(this->implementation, signal_number, ec); - asio::detail::throw_error(ec, "remove"); - } - - /// Remove a signal from a signal_set. - /** - * This function removes the specified signal from the set. It has no effect - * if the signal is not in the set. - * - * @param signal_number The signal to be removed from the set. - * - * @param ec Set to indicate what error occurred, if any. - * - * @note Removes any notifications that have been queued for the specified - * signal number. - */ - asio::error_code remove(int signal_number, - asio::error_code& ec) - { - return this->service.remove(this->implementation, signal_number, ec); - } - - /// Remove all signals from a signal_set. - /** - * This function removes all signals from the set. It has no effect if the set - * is already empty. - * - * @throws asio::system_error Thrown on failure. - * - * @note Removes all queued notifications. - */ - void clear() - { - asio::error_code ec; - this->service.clear(this->implementation, ec); - asio::detail::throw_error(ec, "clear"); - } - - /// Remove all signals from a signal_set. - /** - * This function removes all signals from the set. It has no effect if the set - * is already empty. - * - * @param ec Set to indicate what error occurred, if any. - * - * @note Removes all queued notifications. - */ - asio::error_code clear(asio::error_code& ec) - { - return this->service.clear(this->implementation, ec); - } - - /// Cancel all operations associated with the signal set. - /** - * This function forces the completion of any pending asynchronous wait - * operations against the signal set. The handler for each cancelled - * operation will be invoked with the asio::error::operation_aborted - * error code. - * - * Cancellation does not alter the set of registered signals. - * - * @throws asio::system_error Thrown on failure. - * - * @note If a registered signal occurred before cancel() is called, then the - * handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - void cancel() - { - asio::error_code ec; - this->service.cancel(this->implementation, ec); - asio::detail::throw_error(ec, "cancel"); - } - - /// Cancel all operations associated with the signal set. - /** - * This function forces the completion of any pending asynchronous wait - * operations against the signal set. The handler for each cancelled - * operation will be invoked with the asio::error::operation_aborted - * error code. - * - * Cancellation does not alter the set of registered signals. - * - * @param ec Set to indicate what error occurred, if any. - * - * @note If a registered signal occurred before cancel() is called, then the - * handlers for asynchronous wait operations will: - * - * @li have already been invoked; or - * - * @li have been queued for invocation in the near future. - * - * These handlers can no longer be cancelled, and therefore are passed an - * error code that indicates the successful completion of the wait operation. - */ - asio::error_code cancel(asio::error_code& ec) - { - return this->service.cancel(this->implementation, ec); - } - - /// Start an asynchronous operation to wait for a signal to be delivered. - /** - * This function may be used to initiate an asynchronous wait against the - * signal set. It always returns immediately. - * - * For each call to async_wait(), the supplied handler will be called exactly - * once. The handler will be called when: - * - * @li One of the registered signals in the signal set occurs; or - * - * @li The signal set was cancelled, in which case the handler is passed the - * error code asio::error::operation_aborted. - * - * @param handler The handler to be called when the signal occurs. Copies - * will be made of the handler as required. The function signature of the - * handler must be: - * @code void handler( - * const asio::error_code& error, // Result of operation. - * int signal_number // Indicates which signal occurred. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - */ - template - ASIO_INITFN_RESULT_TYPE(SignalHandler, - void (asio::error_code, int)) - async_wait(ASIO_MOVE_ARG(SignalHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a SignalHandler. - ASIO_SIGNAL_HANDLER_CHECK(SignalHandler, handler) type_check; - - return this->service.async_wait(this->implementation, - ASIO_MOVE_CAST(SignalHandler)(handler)); - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // ASIO_BASIC_SIGNAL_SET_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket.hpp deleted file mode 100644 index be4fb3c2c3af7..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket.hpp +++ /dev/null @@ -1,1518 +0,0 @@ -// -// basic_socket.hpp -// ~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_SOCKET_HPP -#define ASIO_BASIC_SOCKET_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" -#include "asio/async_result.hpp" -#include "asio/basic_io_object.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/detail/type_traits.hpp" -#include "asio/error.hpp" -#include "asio/socket_base.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides socket functionality. -/** - * The basic_socket class template provides functionality that is common to both - * stream-oriented and datagram-oriented sockets. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - */ -template -class basic_socket - : public basic_io_object, - public socket_base -{ -public: - /// (Deprecated: Use native_handle_type.) The native representation of a - /// socket. - typedef typename SocketService::native_handle_type native_type; - - /// The native representation of a socket. - typedef typename SocketService::native_handle_type native_handle_type; - - /// The protocol type. - typedef Protocol protocol_type; - - /// The endpoint type. - typedef typename Protocol::endpoint endpoint_type; - - /// A basic_socket is always the lowest layer. - typedef basic_socket lowest_layer_type; - - /// Construct a basic_socket without opening it. - /** - * This constructor creates a socket without opening it. - * - * @param io_service The io_service object that the socket will use to - * dispatch handlers for any asynchronous operations performed on the socket. - */ - explicit basic_socket(asio::io_service& io_service) - : basic_io_object(io_service) - { - } - - /// Construct and open a basic_socket. - /** - * This constructor creates and opens a socket. - * - * @param io_service The io_service object that the socket will use to - * dispatch handlers for any asynchronous operations performed on the socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @throws asio::system_error Thrown on failure. - */ - basic_socket(asio::io_service& io_service, - const protocol_type& protocol) - : basic_io_object(io_service) - { - asio::error_code ec; - this->get_service().open(this->get_implementation(), protocol, ec); - asio::detail::throw_error(ec, "open"); - } - - /// Construct a basic_socket, opening it and binding it to the given local - /// endpoint. - /** - * This constructor creates a socket and automatically opens it bound to the - * specified endpoint on the local machine. The protocol used is the protocol - * associated with the given endpoint. - * - * @param io_service The io_service object that the socket will use to - * dispatch handlers for any asynchronous operations performed on the socket. - * - * @param endpoint An endpoint on the local machine to which the socket will - * be bound. - * - * @throws asio::system_error Thrown on failure. - */ - basic_socket(asio::io_service& io_service, - const endpoint_type& endpoint) - : basic_io_object(io_service) - { - asio::error_code ec; - const protocol_type protocol = endpoint.protocol(); - this->get_service().open(this->get_implementation(), protocol, ec); - asio::detail::throw_error(ec, "open"); - this->get_service().bind(this->get_implementation(), endpoint, ec); - asio::detail::throw_error(ec, "bind"); - } - - /// Construct a basic_socket on an existing native socket. - /** - * This constructor creates a socket object to hold an existing native socket. - * - * @param io_service The io_service object that the socket will use to - * dispatch handlers for any asynchronous operations performed on the socket. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @param native_socket A native socket. - * - * @throws asio::system_error Thrown on failure. - */ - basic_socket(asio::io_service& io_service, - const protocol_type& protocol, const native_handle_type& native_socket) - : basic_io_object(io_service) - { - asio::error_code ec; - this->get_service().assign(this->get_implementation(), - protocol, native_socket, ec); - asio::detail::throw_error(ec, "assign"); - } - -#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - /// Move-construct a basic_socket from another. - /** - * This constructor moves a socket from one object to another. - * - * @param other The other basic_socket object from which the move will - * occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket(io_service&) constructor. - */ - basic_socket(basic_socket&& other) - : basic_io_object( - ASIO_MOVE_CAST(basic_socket)(other)) - { - } - - /// Move-assign a basic_socket from another. - /** - * This assignment operator moves a socket from one object to another. - * - * @param other The other basic_socket object from which the move will - * occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket(io_service&) constructor. - */ - basic_socket& operator=(basic_socket&& other) - { - basic_io_object::operator=( - ASIO_MOVE_CAST(basic_socket)(other)); - return *this; - } - - // All sockets have access to each other's implementations. - template - friend class basic_socket; - - /// Move-construct a basic_socket from a socket of another protocol type. - /** - * This constructor moves a socket from one object to another. - * - * @param other The other basic_socket object from which the move will - * occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket(io_service&) constructor. - */ - template - basic_socket(basic_socket&& other, - typename enable_if::value>::type* = 0) - : basic_io_object(other.get_io_service()) - { - this->get_service().template converting_move_construct( - this->get_implementation(), other.get_implementation()); - } - - /// Move-assign a basic_socket from a socket of another protocol type. - /** - * This assignment operator moves a socket from one object to another. - * - * @param other The other basic_socket object from which the move will - * occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket(io_service&) constructor. - */ - template - typename enable_if::value, - basic_socket>::type& operator=( - basic_socket&& other) - { - basic_socket tmp(ASIO_MOVE_CAST2(basic_socket< - Protocol1, SocketService1>)(other)); - basic_io_object::operator=( - ASIO_MOVE_CAST(basic_socket)(tmp)); - return *this; - } -#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - - /// Get a reference to the lowest layer. - /** - * This function returns a reference to the lowest layer in a stack of - * layers. Since a basic_socket cannot contain any further layers, it simply - * returns a reference to itself. - * - * @return A reference to the lowest layer in the stack of layers. Ownership - * is not transferred to the caller. - */ - lowest_layer_type& lowest_layer() - { - return *this; - } - - /// Get a const reference to the lowest layer. - /** - * This function returns a const reference to the lowest layer in a stack of - * layers. Since a basic_socket cannot contain any further layers, it simply - * returns a reference to itself. - * - * @return A const reference to the lowest layer in the stack of layers. - * Ownership is not transferred to the caller. - */ - const lowest_layer_type& lowest_layer() const - { - return *this; - } - - /// Open the socket using the specified protocol. - /** - * This function opens the socket so that it will use the specified protocol. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * socket.open(asio::ip::tcp::v4()); - * @endcode - */ - void open(const protocol_type& protocol = protocol_type()) - { - asio::error_code ec; - this->get_service().open(this->get_implementation(), protocol, ec); - asio::detail::throw_error(ec, "open"); - } - - /// Open the socket using the specified protocol. - /** - * This function opens the socket so that it will use the specified protocol. - * - * @param protocol An object specifying which protocol is to be used. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * asio::error_code ec; - * socket.open(asio::ip::tcp::v4(), ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code open(const protocol_type& protocol, - asio::error_code& ec) - { - return this->get_service().open(this->get_implementation(), protocol, ec); - } - - /// Assign an existing native socket to the socket. - /* - * This function opens the socket to hold an existing native socket. - * - * @param protocol An object specifying which protocol is to be used. - * - * @param native_socket A native socket. - * - * @throws asio::system_error Thrown on failure. - */ - void assign(const protocol_type& protocol, - const native_handle_type& native_socket) - { - asio::error_code ec; - this->get_service().assign(this->get_implementation(), - protocol, native_socket, ec); - asio::detail::throw_error(ec, "assign"); - } - - /// Assign an existing native socket to the socket. - /* - * This function opens the socket to hold an existing native socket. - * - * @param protocol An object specifying which protocol is to be used. - * - * @param native_socket A native socket. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code assign(const protocol_type& protocol, - const native_handle_type& native_socket, asio::error_code& ec) - { - return this->get_service().assign(this->get_implementation(), - protocol, native_socket, ec); - } - - /// Determine whether the socket is open. - bool is_open() const - { - return this->get_service().is_open(this->get_implementation()); - } - - /// Close the socket. - /** - * This function is used to close the socket. Any asynchronous send, receive - * or connect operations will be cancelled immediately, and will complete - * with the asio::error::operation_aborted error. - * - * @throws asio::system_error Thrown on failure. Note that, even if - * the function indicates an error, the underlying descriptor is closed. - * - * @note For portable behaviour with respect to graceful closure of a - * connected socket, call shutdown() before closing the socket. - */ - void close() - { - asio::error_code ec; - this->get_service().close(this->get_implementation(), ec); - asio::detail::throw_error(ec, "close"); - } - - /// Close the socket. - /** - * This function is used to close the socket. Any asynchronous send, receive - * or connect operations will be cancelled immediately, and will complete - * with the asio::error::operation_aborted error. - * - * @param ec Set to indicate what error occurred, if any. Note that, even if - * the function indicates an error, the underlying descriptor is closed. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::error_code ec; - * socket.close(ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - * - * @note For portable behaviour with respect to graceful closure of a - * connected socket, call shutdown() before closing the socket. - */ - asio::error_code close(asio::error_code& ec) - { - return this->get_service().close(this->get_implementation(), ec); - } - - /// (Deprecated: Use native_handle().) Get the native socket representation. - /** - * This function may be used to obtain the underlying representation of the - * socket. This is intended to allow access to native socket functionality - * that is not otherwise provided. - */ - native_type native() - { - return this->get_service().native_handle(this->get_implementation()); - } - - /// Get the native socket representation. - /** - * This function may be used to obtain the underlying representation of the - * socket. This is intended to allow access to native socket functionality - * that is not otherwise provided. - */ - native_handle_type native_handle() - { - return this->get_service().native_handle(this->get_implementation()); - } - - /// Cancel all asynchronous operations associated with the socket. - /** - * This function causes all outstanding asynchronous connect, send and receive - * operations to finish immediately, and the handlers for cancelled operations - * will be passed the asio::error::operation_aborted error. - * - * @throws asio::system_error Thrown on failure. - * - * @note Calls to cancel() will always fail with - * asio::error::operation_not_supported when run on Windows XP, Windows - * Server 2003, and earlier versions of Windows, unless - * ASIO_ENABLE_CANCELIO is defined. However, the CancelIo function has - * two issues that should be considered before enabling its use: - * - * @li It will only cancel asynchronous operations that were initiated in the - * current thread. - * - * @li It can appear to complete without error, but the request to cancel the - * unfinished operations may be silently ignored by the operating system. - * Whether it works or not seems to depend on the drivers that are installed. - * - * For portable cancellation, consider using one of the following - * alternatives: - * - * @li Disable asio's I/O completion port backend by defining - * ASIO_DISABLE_IOCP. - * - * @li Use the close() function to simultaneously cancel the outstanding - * operations and close the socket. - * - * When running on Windows Vista, Windows Server 2008, and later, the - * CancelIoEx function is always used. This function does not have the - * problems described above. - */ -#if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ - && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \ - && !defined(ASIO_ENABLE_CANCELIO) - __declspec(deprecated("By default, this function always fails with " - "operation_not_supported when used on Windows XP, Windows Server 2003, " - "or earlier. Consult documentation for details.")) -#endif - void cancel() - { - asio::error_code ec; - this->get_service().cancel(this->get_implementation(), ec); - asio::detail::throw_error(ec, "cancel"); - } - - /// Cancel all asynchronous operations associated with the socket. - /** - * This function causes all outstanding asynchronous connect, send and receive - * operations to finish immediately, and the handlers for cancelled operations - * will be passed the asio::error::operation_aborted error. - * - * @param ec Set to indicate what error occurred, if any. - * - * @note Calls to cancel() will always fail with - * asio::error::operation_not_supported when run on Windows XP, Windows - * Server 2003, and earlier versions of Windows, unless - * ASIO_ENABLE_CANCELIO is defined. However, the CancelIo function has - * two issues that should be considered before enabling its use: - * - * @li It will only cancel asynchronous operations that were initiated in the - * current thread. - * - * @li It can appear to complete without error, but the request to cancel the - * unfinished operations may be silently ignored by the operating system. - * Whether it works or not seems to depend on the drivers that are installed. - * - * For portable cancellation, consider using one of the following - * alternatives: - * - * @li Disable asio's I/O completion port backend by defining - * ASIO_DISABLE_IOCP. - * - * @li Use the close() function to simultaneously cancel the outstanding - * operations and close the socket. - * - * When running on Windows Vista, Windows Server 2008, and later, the - * CancelIoEx function is always used. This function does not have the - * problems described above. - */ -#if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ - && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \ - && !defined(ASIO_ENABLE_CANCELIO) - __declspec(deprecated("By default, this function always fails with " - "operation_not_supported when used on Windows XP, Windows Server 2003, " - "or earlier. Consult documentation for details.")) -#endif - asio::error_code cancel(asio::error_code& ec) - { - return this->get_service().cancel(this->get_implementation(), ec); - } - - /// Determine whether the socket is at the out-of-band data mark. - /** - * This function is used to check whether the socket input is currently - * positioned at the out-of-band data mark. - * - * @return A bool indicating whether the socket is at the out-of-band data - * mark. - * - * @throws asio::system_error Thrown on failure. - */ - bool at_mark() const - { - asio::error_code ec; - bool b = this->get_service().at_mark(this->get_implementation(), ec); - asio::detail::throw_error(ec, "at_mark"); - return b; - } - - /// Determine whether the socket is at the out-of-band data mark. - /** - * This function is used to check whether the socket input is currently - * positioned at the out-of-band data mark. - * - * @param ec Set to indicate what error occurred, if any. - * - * @return A bool indicating whether the socket is at the out-of-band data - * mark. - */ - bool at_mark(asio::error_code& ec) const - { - return this->get_service().at_mark(this->get_implementation(), ec); - } - - /// Determine the number of bytes available for reading. - /** - * This function is used to determine the number of bytes that may be read - * without blocking. - * - * @return The number of bytes that may be read without blocking, or 0 if an - * error occurs. - * - * @throws asio::system_error Thrown on failure. - */ - std::size_t available() const - { - asio::error_code ec; - std::size_t s = this->get_service().available( - this->get_implementation(), ec); - asio::detail::throw_error(ec, "available"); - return s; - } - - /// Determine the number of bytes available for reading. - /** - * This function is used to determine the number of bytes that may be read - * without blocking. - * - * @param ec Set to indicate what error occurred, if any. - * - * @return The number of bytes that may be read without blocking, or 0 if an - * error occurs. - */ - std::size_t available(asio::error_code& ec) const - { - return this->get_service().available(this->get_implementation(), ec); - } - - /// Bind the socket to the given local endpoint. - /** - * This function binds the socket to the specified endpoint on the local - * machine. - * - * @param endpoint An endpoint on the local machine to which the socket will - * be bound. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * socket.open(asio::ip::tcp::v4()); - * socket.bind(asio::ip::tcp::endpoint( - * asio::ip::tcp::v4(), 12345)); - * @endcode - */ - void bind(const endpoint_type& endpoint) - { - asio::error_code ec; - this->get_service().bind(this->get_implementation(), endpoint, ec); - asio::detail::throw_error(ec, "bind"); - } - - /// Bind the socket to the given local endpoint. - /** - * This function binds the socket to the specified endpoint on the local - * machine. - * - * @param endpoint An endpoint on the local machine to which the socket will - * be bound. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * socket.open(asio::ip::tcp::v4()); - * asio::error_code ec; - * socket.bind(asio::ip::tcp::endpoint( - * asio::ip::tcp::v4(), 12345), ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code bind(const endpoint_type& endpoint, - asio::error_code& ec) - { - return this->get_service().bind(this->get_implementation(), endpoint, ec); - } - - /// Connect the socket to the specified endpoint. - /** - * This function is used to connect a socket to the specified remote endpoint. - * The function call will block until the connection is successfully made or - * an error occurs. - * - * The socket is automatically opened if it is not already open. If the - * connect fails, and the socket was automatically opened, the socket is - * not returned to the closed state. - * - * @param peer_endpoint The remote endpoint to which the socket will be - * connected. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * asio::ip::tcp::endpoint endpoint( - * asio::ip::address::from_string("1.2.3.4"), 12345); - * socket.connect(endpoint); - * @endcode - */ - void connect(const endpoint_type& peer_endpoint) - { - asio::error_code ec; - if (!is_open()) - { - this->get_service().open(this->get_implementation(), - peer_endpoint.protocol(), ec); - asio::detail::throw_error(ec, "connect"); - } - this->get_service().connect(this->get_implementation(), peer_endpoint, ec); - asio::detail::throw_error(ec, "connect"); - } - - /// Connect the socket to the specified endpoint. - /** - * This function is used to connect a socket to the specified remote endpoint. - * The function call will block until the connection is successfully made or - * an error occurs. - * - * The socket is automatically opened if it is not already open. If the - * connect fails, and the socket was automatically opened, the socket is - * not returned to the closed state. - * - * @param peer_endpoint The remote endpoint to which the socket will be - * connected. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * asio::ip::tcp::endpoint endpoint( - * asio::ip::address::from_string("1.2.3.4"), 12345); - * asio::error_code ec; - * socket.connect(endpoint, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code connect(const endpoint_type& peer_endpoint, - asio::error_code& ec) - { - if (!is_open()) - { - if (this->get_service().open(this->get_implementation(), - peer_endpoint.protocol(), ec)) - { - return ec; - } - } - - return this->get_service().connect( - this->get_implementation(), peer_endpoint, ec); - } - - /// Start an asynchronous connect. - /** - * This function is used to asynchronously connect a socket to the specified - * remote endpoint. The function call always returns immediately. - * - * The socket is automatically opened if it is not already open. If the - * connect fails, and the socket was automatically opened, the socket is - * not returned to the closed state. - * - * @param peer_endpoint The remote endpoint to which the socket will be - * connected. Copies will be made of the endpoint object as required. - * - * @param handler The handler to be called when the connection operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error // Result of operation - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * @code - * void connect_handler(const asio::error_code& error) - * { - * if (!error) - * { - * // Connect succeeded. - * } - * } - * - * ... - * - * asio::ip::tcp::socket socket(io_service); - * asio::ip::tcp::endpoint endpoint( - * asio::ip::address::from_string("1.2.3.4"), 12345); - * socket.async_connect(endpoint, connect_handler); - * @endcode - */ - template - ASIO_INITFN_RESULT_TYPE(ConnectHandler, - void (asio::error_code)) - async_connect(const endpoint_type& peer_endpoint, - ASIO_MOVE_ARG(ConnectHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a ConnectHandler. - ASIO_CONNECT_HANDLER_CHECK(ConnectHandler, handler) type_check; - - if (!is_open()) - { - asio::error_code ec; - const protocol_type protocol = peer_endpoint.protocol(); - if (this->get_service().open(this->get_implementation(), protocol, ec)) - { - detail::async_result_init< - ConnectHandler, void (asio::error_code)> init( - ASIO_MOVE_CAST(ConnectHandler)(handler)); - - this->get_io_service().post( - asio::detail::bind_handler( - ASIO_MOVE_CAST(ASIO_HANDLER_TYPE( - ConnectHandler, void (asio::error_code)))( - init.handler), ec)); - - return init.result.get(); - } - } - - return this->get_service().async_connect(this->get_implementation(), - peer_endpoint, ASIO_MOVE_CAST(ConnectHandler)(handler)); - } - - /// Set an option on the socket. - /** - * This function is used to set an option on the socket. - * - * @param option The new option value to be set on the socket. - * - * @throws asio::system_error Thrown on failure. - * - * @sa SettableSocketOption @n - * asio::socket_base::broadcast @n - * asio::socket_base::do_not_route @n - * asio::socket_base::keep_alive @n - * asio::socket_base::linger @n - * asio::socket_base::receive_buffer_size @n - * asio::socket_base::receive_low_watermark @n - * asio::socket_base::reuse_address @n - * asio::socket_base::send_buffer_size @n - * asio::socket_base::send_low_watermark @n - * asio::ip::multicast::join_group @n - * asio::ip::multicast::leave_group @n - * asio::ip::multicast::enable_loopback @n - * asio::ip::multicast::outbound_interface @n - * asio::ip::multicast::hops @n - * asio::ip::tcp::no_delay - * - * @par Example - * Setting the IPPROTO_TCP/TCP_NODELAY option: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::no_delay option(true); - * socket.set_option(option); - * @endcode - */ - template - void set_option(const SettableSocketOption& option) - { - asio::error_code ec; - this->get_service().set_option(this->get_implementation(), option, ec); - asio::detail::throw_error(ec, "set_option"); - } - - /// Set an option on the socket. - /** - * This function is used to set an option on the socket. - * - * @param option The new option value to be set on the socket. - * - * @param ec Set to indicate what error occurred, if any. - * - * @sa SettableSocketOption @n - * asio::socket_base::broadcast @n - * asio::socket_base::do_not_route @n - * asio::socket_base::keep_alive @n - * asio::socket_base::linger @n - * asio::socket_base::receive_buffer_size @n - * asio::socket_base::receive_low_watermark @n - * asio::socket_base::reuse_address @n - * asio::socket_base::send_buffer_size @n - * asio::socket_base::send_low_watermark @n - * asio::ip::multicast::join_group @n - * asio::ip::multicast::leave_group @n - * asio::ip::multicast::enable_loopback @n - * asio::ip::multicast::outbound_interface @n - * asio::ip::multicast::hops @n - * asio::ip::tcp::no_delay - * - * @par Example - * Setting the IPPROTO_TCP/TCP_NODELAY option: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::no_delay option(true); - * asio::error_code ec; - * socket.set_option(option, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - template - asio::error_code set_option(const SettableSocketOption& option, - asio::error_code& ec) - { - return this->get_service().set_option( - this->get_implementation(), option, ec); - } - - /// Get an option from the socket. - /** - * This function is used to get the current value of an option on the socket. - * - * @param option The option value to be obtained from the socket. - * - * @throws asio::system_error Thrown on failure. - * - * @sa GettableSocketOption @n - * asio::socket_base::broadcast @n - * asio::socket_base::do_not_route @n - * asio::socket_base::keep_alive @n - * asio::socket_base::linger @n - * asio::socket_base::receive_buffer_size @n - * asio::socket_base::receive_low_watermark @n - * asio::socket_base::reuse_address @n - * asio::socket_base::send_buffer_size @n - * asio::socket_base::send_low_watermark @n - * asio::ip::multicast::join_group @n - * asio::ip::multicast::leave_group @n - * asio::ip::multicast::enable_loopback @n - * asio::ip::multicast::outbound_interface @n - * asio::ip::multicast::hops @n - * asio::ip::tcp::no_delay - * - * @par Example - * Getting the value of the SOL_SOCKET/SO_KEEPALIVE option: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::socket::keep_alive option; - * socket.get_option(option); - * bool is_set = option.value(); - * @endcode - */ - template - void get_option(GettableSocketOption& option) const - { - asio::error_code ec; - this->get_service().get_option(this->get_implementation(), option, ec); - asio::detail::throw_error(ec, "get_option"); - } - - /// Get an option from the socket. - /** - * This function is used to get the current value of an option on the socket. - * - * @param option The option value to be obtained from the socket. - * - * @param ec Set to indicate what error occurred, if any. - * - * @sa GettableSocketOption @n - * asio::socket_base::broadcast @n - * asio::socket_base::do_not_route @n - * asio::socket_base::keep_alive @n - * asio::socket_base::linger @n - * asio::socket_base::receive_buffer_size @n - * asio::socket_base::receive_low_watermark @n - * asio::socket_base::reuse_address @n - * asio::socket_base::send_buffer_size @n - * asio::socket_base::send_low_watermark @n - * asio::ip::multicast::join_group @n - * asio::ip::multicast::leave_group @n - * asio::ip::multicast::enable_loopback @n - * asio::ip::multicast::outbound_interface @n - * asio::ip::multicast::hops @n - * asio::ip::tcp::no_delay - * - * @par Example - * Getting the value of the SOL_SOCKET/SO_KEEPALIVE option: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::socket::keep_alive option; - * asio::error_code ec; - * socket.get_option(option, ec); - * if (ec) - * { - * // An error occurred. - * } - * bool is_set = option.value(); - * @endcode - */ - template - asio::error_code get_option(GettableSocketOption& option, - asio::error_code& ec) const - { - return this->get_service().get_option( - this->get_implementation(), option, ec); - } - - /// Perform an IO control command on the socket. - /** - * This function is used to execute an IO control command on the socket. - * - * @param command The IO control command to be performed on the socket. - * - * @throws asio::system_error Thrown on failure. - * - * @sa IoControlCommand @n - * asio::socket_base::bytes_readable @n - * asio::socket_base::non_blocking_io - * - * @par Example - * Getting the number of bytes ready to read: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::socket::bytes_readable command; - * socket.io_control(command); - * std::size_t bytes_readable = command.get(); - * @endcode - */ - template - void io_control(IoControlCommand& command) - { - asio::error_code ec; - this->get_service().io_control(this->get_implementation(), command, ec); - asio::detail::throw_error(ec, "io_control"); - } - - /// Perform an IO control command on the socket. - /** - * This function is used to execute an IO control command on the socket. - * - * @param command The IO control command to be performed on the socket. - * - * @param ec Set to indicate what error occurred, if any. - * - * @sa IoControlCommand @n - * asio::socket_base::bytes_readable @n - * asio::socket_base::non_blocking_io - * - * @par Example - * Getting the number of bytes ready to read: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::socket::bytes_readable command; - * asio::error_code ec; - * socket.io_control(command, ec); - * if (ec) - * { - * // An error occurred. - * } - * std::size_t bytes_readable = command.get(); - * @endcode - */ - template - asio::error_code io_control(IoControlCommand& command, - asio::error_code& ec) - { - return this->get_service().io_control( - this->get_implementation(), command, ec); - } - - /// Gets the non-blocking mode of the socket. - /** - * @returns @c true if the socket's synchronous operations will fail with - * asio::error::would_block if they are unable to perform the requested - * operation immediately. If @c false, synchronous operations will block - * until complete. - * - * @note The non-blocking mode has no effect on the behaviour of asynchronous - * operations. Asynchronous operations will never fail with the error - * asio::error::would_block. - */ - bool non_blocking() const - { - return this->get_service().non_blocking(this->get_implementation()); - } - - /// Sets the non-blocking mode of the socket. - /** - * @param mode If @c true, the socket's synchronous operations will fail with - * asio::error::would_block if they are unable to perform the requested - * operation immediately. If @c false, synchronous operations will block - * until complete. - * - * @throws asio::system_error Thrown on failure. - * - * @note The non-blocking mode has no effect on the behaviour of asynchronous - * operations. Asynchronous operations will never fail with the error - * asio::error::would_block. - */ - void non_blocking(bool mode) - { - asio::error_code ec; - this->get_service().non_blocking(this->get_implementation(), mode, ec); - asio::detail::throw_error(ec, "non_blocking"); - } - - /// Sets the non-blocking mode of the socket. - /** - * @param mode If @c true, the socket's synchronous operations will fail with - * asio::error::would_block if they are unable to perform the requested - * operation immediately. If @c false, synchronous operations will block - * until complete. - * - * @param ec Set to indicate what error occurred, if any. - * - * @note The non-blocking mode has no effect on the behaviour of asynchronous - * operations. Asynchronous operations will never fail with the error - * asio::error::would_block. - */ - asio::error_code non_blocking( - bool mode, asio::error_code& ec) - { - return this->get_service().non_blocking( - this->get_implementation(), mode, ec); - } - - /// Gets the non-blocking mode of the native socket implementation. - /** - * This function is used to retrieve the non-blocking mode of the underlying - * native socket. This mode has no effect on the behaviour of the socket - * object's synchronous operations. - * - * @returns @c true if the underlying socket is in non-blocking mode and - * direct system calls may fail with asio::error::would_block (or the - * equivalent system error). - * - * @note The current non-blocking mode is cached by the socket object. - * Consequently, the return value may be incorrect if the non-blocking mode - * was set directly on the native socket. - * - * @par Example - * This function is intended to allow the encapsulation of arbitrary - * non-blocking system calls as asynchronous operations, in a way that is - * transparent to the user of the socket object. The following example - * illustrates how Linux's @c sendfile system call might be encapsulated: - * @code template - * struct sendfile_op - * { - * tcp::socket& sock_; - * int fd_; - * Handler handler_; - * off_t offset_; - * std::size_t total_bytes_transferred_; - * - * // Function call operator meeting WriteHandler requirements. - * // Used as the handler for the async_write_some operation. - * void operator()(asio::error_code ec, std::size_t) - * { - * // Put the underlying socket into non-blocking mode. - * if (!ec) - * if (!sock_.native_non_blocking()) - * sock_.native_non_blocking(true, ec); - * - * if (!ec) - * { - * for (;;) - * { - * // Try the system call. - * errno = 0; - * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); - * ec = asio::error_code(n < 0 ? errno : 0, - * asio::error::get_system_category()); - * total_bytes_transferred_ += ec ? 0 : n; - * - * // Retry operation immediately if interrupted by signal. - * if (ec == asio::error::interrupted) - * continue; - * - * // Check if we need to run the operation again. - * if (ec == asio::error::would_block - * || ec == asio::error::try_again) - * { - * // We have to wait for the socket to become ready again. - * sock_.async_write_some(asio::null_buffers(), *this); - * return; - * } - * - * if (ec || n == 0) - * { - * // An error occurred, or we have reached the end of the file. - * // Either way we must exit the loop so we can call the handler. - * break; - * } - * - * // Loop around to try calling sendfile again. - * } - * } - * - * // Pass result back to user's handler. - * handler_(ec, total_bytes_transferred_); - * } - * }; - * - * template - * void async_sendfile(tcp::socket& sock, int fd, Handler h) - * { - * sendfile_op op = { sock, fd, h, 0, 0 }; - * sock.async_write_some(asio::null_buffers(), op); - * } @endcode - */ - bool native_non_blocking() const - { - return this->get_service().native_non_blocking(this->get_implementation()); - } - - /// Sets the non-blocking mode of the native socket implementation. - /** - * This function is used to modify the non-blocking mode of the underlying - * native socket. It has no effect on the behaviour of the socket object's - * synchronous operations. - * - * @param mode If @c true, the underlying socket is put into non-blocking - * mode and direct system calls may fail with asio::error::would_block - * (or the equivalent system error). - * - * @throws asio::system_error Thrown on failure. If the @c mode is - * @c false, but the current value of @c non_blocking() is @c true, this - * function fails with asio::error::invalid_argument, as the - * combination does not make sense. - * - * @par Example - * This function is intended to allow the encapsulation of arbitrary - * non-blocking system calls as asynchronous operations, in a way that is - * transparent to the user of the socket object. The following example - * illustrates how Linux's @c sendfile system call might be encapsulated: - * @code template - * struct sendfile_op - * { - * tcp::socket& sock_; - * int fd_; - * Handler handler_; - * off_t offset_; - * std::size_t total_bytes_transferred_; - * - * // Function call operator meeting WriteHandler requirements. - * // Used as the handler for the async_write_some operation. - * void operator()(asio::error_code ec, std::size_t) - * { - * // Put the underlying socket into non-blocking mode. - * if (!ec) - * if (!sock_.native_non_blocking()) - * sock_.native_non_blocking(true, ec); - * - * if (!ec) - * { - * for (;;) - * { - * // Try the system call. - * errno = 0; - * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); - * ec = asio::error_code(n < 0 ? errno : 0, - * asio::error::get_system_category()); - * total_bytes_transferred_ += ec ? 0 : n; - * - * // Retry operation immediately if interrupted by signal. - * if (ec == asio::error::interrupted) - * continue; - * - * // Check if we need to run the operation again. - * if (ec == asio::error::would_block - * || ec == asio::error::try_again) - * { - * // We have to wait for the socket to become ready again. - * sock_.async_write_some(asio::null_buffers(), *this); - * return; - * } - * - * if (ec || n == 0) - * { - * // An error occurred, or we have reached the end of the file. - * // Either way we must exit the loop so we can call the handler. - * break; - * } - * - * // Loop around to try calling sendfile again. - * } - * } - * - * // Pass result back to user's handler. - * handler_(ec, total_bytes_transferred_); - * } - * }; - * - * template - * void async_sendfile(tcp::socket& sock, int fd, Handler h) - * { - * sendfile_op op = { sock, fd, h, 0, 0 }; - * sock.async_write_some(asio::null_buffers(), op); - * } @endcode - */ - void native_non_blocking(bool mode) - { - asio::error_code ec; - this->get_service().native_non_blocking( - this->get_implementation(), mode, ec); - asio::detail::throw_error(ec, "native_non_blocking"); - } - - /// Sets the non-blocking mode of the native socket implementation. - /** - * This function is used to modify the non-blocking mode of the underlying - * native socket. It has no effect on the behaviour of the socket object's - * synchronous operations. - * - * @param mode If @c true, the underlying socket is put into non-blocking - * mode and direct system calls may fail with asio::error::would_block - * (or the equivalent system error). - * - * @param ec Set to indicate what error occurred, if any. If the @c mode is - * @c false, but the current value of @c non_blocking() is @c true, this - * function fails with asio::error::invalid_argument, as the - * combination does not make sense. - * - * @par Example - * This function is intended to allow the encapsulation of arbitrary - * non-blocking system calls as asynchronous operations, in a way that is - * transparent to the user of the socket object. The following example - * illustrates how Linux's @c sendfile system call might be encapsulated: - * @code template - * struct sendfile_op - * { - * tcp::socket& sock_; - * int fd_; - * Handler handler_; - * off_t offset_; - * std::size_t total_bytes_transferred_; - * - * // Function call operator meeting WriteHandler requirements. - * // Used as the handler for the async_write_some operation. - * void operator()(asio::error_code ec, std::size_t) - * { - * // Put the underlying socket into non-blocking mode. - * if (!ec) - * if (!sock_.native_non_blocking()) - * sock_.native_non_blocking(true, ec); - * - * if (!ec) - * { - * for (;;) - * { - * // Try the system call. - * errno = 0; - * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); - * ec = asio::error_code(n < 0 ? errno : 0, - * asio::error::get_system_category()); - * total_bytes_transferred_ += ec ? 0 : n; - * - * // Retry operation immediately if interrupted by signal. - * if (ec == asio::error::interrupted) - * continue; - * - * // Check if we need to run the operation again. - * if (ec == asio::error::would_block - * || ec == asio::error::try_again) - * { - * // We have to wait for the socket to become ready again. - * sock_.async_write_some(asio::null_buffers(), *this); - * return; - * } - * - * if (ec || n == 0) - * { - * // An error occurred, or we have reached the end of the file. - * // Either way we must exit the loop so we can call the handler. - * break; - * } - * - * // Loop around to try calling sendfile again. - * } - * } - * - * // Pass result back to user's handler. - * handler_(ec, total_bytes_transferred_); - * } - * }; - * - * template - * void async_sendfile(tcp::socket& sock, int fd, Handler h) - * { - * sendfile_op op = { sock, fd, h, 0, 0 }; - * sock.async_write_some(asio::null_buffers(), op); - * } @endcode - */ - asio::error_code native_non_blocking( - bool mode, asio::error_code& ec) - { - return this->get_service().native_non_blocking( - this->get_implementation(), mode, ec); - } - - /// Get the local endpoint of the socket. - /** - * This function is used to obtain the locally bound endpoint of the socket. - * - * @returns An object that represents the local endpoint of the socket. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::endpoint endpoint = socket.local_endpoint(); - * @endcode - */ - endpoint_type local_endpoint() const - { - asio::error_code ec; - endpoint_type ep = this->get_service().local_endpoint( - this->get_implementation(), ec); - asio::detail::throw_error(ec, "local_endpoint"); - return ep; - } - - /// Get the local endpoint of the socket. - /** - * This function is used to obtain the locally bound endpoint of the socket. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns An object that represents the local endpoint of the socket. - * Returns a default-constructed endpoint object if an error occurred. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::error_code ec; - * asio::ip::tcp::endpoint endpoint = socket.local_endpoint(ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - endpoint_type local_endpoint(asio::error_code& ec) const - { - return this->get_service().local_endpoint(this->get_implementation(), ec); - } - - /// Get the remote endpoint of the socket. - /** - * This function is used to obtain the remote endpoint of the socket. - * - * @returns An object that represents the remote endpoint of the socket. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::ip::tcp::endpoint endpoint = socket.remote_endpoint(); - * @endcode - */ - endpoint_type remote_endpoint() const - { - asio::error_code ec; - endpoint_type ep = this->get_service().remote_endpoint( - this->get_implementation(), ec); - asio::detail::throw_error(ec, "remote_endpoint"); - return ep; - } - - /// Get the remote endpoint of the socket. - /** - * This function is used to obtain the remote endpoint of the socket. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns An object that represents the remote endpoint of the socket. - * Returns a default-constructed endpoint object if an error occurred. - * - * @par Example - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::error_code ec; - * asio::ip::tcp::endpoint endpoint = socket.remote_endpoint(ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - endpoint_type remote_endpoint(asio::error_code& ec) const - { - return this->get_service().remote_endpoint(this->get_implementation(), ec); - } - - /// Disable sends or receives on the socket. - /** - * This function is used to disable send operations, receive operations, or - * both. - * - * @param what Determines what types of operation will no longer be allowed. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * Shutting down the send side of the socket: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * socket.shutdown(asio::ip::tcp::socket::shutdown_send); - * @endcode - */ - void shutdown(shutdown_type what) - { - asio::error_code ec; - this->get_service().shutdown(this->get_implementation(), what, ec); - asio::detail::throw_error(ec, "shutdown"); - } - - /// Disable sends or receives on the socket. - /** - * This function is used to disable send operations, receive operations, or - * both. - * - * @param what Determines what types of operation will no longer be allowed. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * Shutting down the send side of the socket: - * @code - * asio::ip::tcp::socket socket(io_service); - * ... - * asio::error_code ec; - * socket.shutdown(asio::ip::tcp::socket::shutdown_send, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code shutdown(shutdown_type what, - asio::error_code& ec) - { - return this->get_service().shutdown(this->get_implementation(), what, ec); - } - -protected: - /// Protected destructor to prevent deletion through this type. - ~basic_socket() - { - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // ASIO_BASIC_SOCKET_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_acceptor.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_acceptor.hpp deleted file mode 100644 index f69f483250f27..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_acceptor.hpp +++ /dev/null @@ -1,1136 +0,0 @@ -// -// basic_socket_acceptor.hpp -// ~~~~~~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_SOCKET_ACCEPTOR_HPP -#define ASIO_BASIC_SOCKET_ACCEPTOR_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" -#include "asio/basic_io_object.hpp" -#include "asio/basic_socket.hpp" -#include "asio/detail/handler_type_requirements.hpp" -#include "asio/detail/throw_error.hpp" -#include "asio/detail/type_traits.hpp" -#include "asio/error.hpp" -#include "asio/socket_acceptor_service.hpp" -#include "asio/socket_base.hpp" - -#include "asio/detail/push_options.hpp" - -namespace asio { - -/// Provides the ability to accept new connections. -/** - * The basic_socket_acceptor class template is used for accepting new socket - * connections. - * - * @par Thread Safety - * @e Distinct @e objects: Safe.@n - * @e Shared @e objects: Unsafe. - * - * @par Example - * Opening a socket acceptor with the SO_REUSEADDR option enabled: - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * asio::ip::tcp::endpoint endpoint(asio::ip::tcp::v4(), port); - * acceptor.open(endpoint.protocol()); - * acceptor.set_option(asio::ip::tcp::acceptor::reuse_address(true)); - * acceptor.bind(endpoint); - * acceptor.listen(); - * @endcode - */ -template > -class basic_socket_acceptor - : public basic_io_object, - public socket_base -{ -public: - /// (Deprecated: Use native_handle_type.) The native representation of an - /// acceptor. - typedef typename SocketAcceptorService::native_handle_type native_type; - - /// The native representation of an acceptor. - typedef typename SocketAcceptorService::native_handle_type native_handle_type; - - /// The protocol type. - typedef Protocol protocol_type; - - /// The endpoint type. - typedef typename Protocol::endpoint endpoint_type; - - /// Construct an acceptor without opening it. - /** - * This constructor creates an acceptor without opening it to listen for new - * connections. The open() function must be called before the acceptor can - * accept new socket connections. - * - * @param io_service The io_service object that the acceptor will use to - * dispatch handlers for any asynchronous operations performed on the - * acceptor. - */ - explicit basic_socket_acceptor(asio::io_service& io_service) - : basic_io_object(io_service) - { - } - - /// Construct an open acceptor. - /** - * This constructor creates an acceptor and automatically opens it. - * - * @param io_service The io_service object that the acceptor will use to - * dispatch handlers for any asynchronous operations performed on the - * acceptor. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @throws asio::system_error Thrown on failure. - */ - basic_socket_acceptor(asio::io_service& io_service, - const protocol_type& protocol) - : basic_io_object(io_service) - { - asio::error_code ec; - this->get_service().open(this->get_implementation(), protocol, ec); - asio::detail::throw_error(ec, "open"); - } - - /// Construct an acceptor opened on the given endpoint. - /** - * This constructor creates an acceptor and automatically opens it to listen - * for new connections on the specified endpoint. - * - * @param io_service The io_service object that the acceptor will use to - * dispatch handlers for any asynchronous operations performed on the - * acceptor. - * - * @param endpoint An endpoint on the local machine on which the acceptor - * will listen for new connections. - * - * @param reuse_addr Whether the constructor should set the socket option - * socket_base::reuse_address. - * - * @throws asio::system_error Thrown on failure. - * - * @note This constructor is equivalent to the following code: - * @code - * basic_socket_acceptor acceptor(io_service); - * acceptor.open(endpoint.protocol()); - * if (reuse_addr) - * acceptor.set_option(socket_base::reuse_address(true)); - * acceptor.bind(endpoint); - * acceptor.listen(listen_backlog); - * @endcode - */ - basic_socket_acceptor(asio::io_service& io_service, - const endpoint_type& endpoint, bool reuse_addr = true) - : basic_io_object(io_service) - { - asio::error_code ec; - const protocol_type protocol = endpoint.protocol(); - this->get_service().open(this->get_implementation(), protocol, ec); - asio::detail::throw_error(ec, "open"); - if (reuse_addr) - { - this->get_service().set_option(this->get_implementation(), - socket_base::reuse_address(true), ec); - asio::detail::throw_error(ec, "set_option"); - } - this->get_service().bind(this->get_implementation(), endpoint, ec); - asio::detail::throw_error(ec, "bind"); - this->get_service().listen(this->get_implementation(), - socket_base::max_connections, ec); - asio::detail::throw_error(ec, "listen"); - } - - /// Construct a basic_socket_acceptor on an existing native acceptor. - /** - * This constructor creates an acceptor object to hold an existing native - * acceptor. - * - * @param io_service The io_service object that the acceptor will use to - * dispatch handlers for any asynchronous operations performed on the - * acceptor. - * - * @param protocol An object specifying protocol parameters to be used. - * - * @param native_acceptor A native acceptor. - * - * @throws asio::system_error Thrown on failure. - */ - basic_socket_acceptor(asio::io_service& io_service, - const protocol_type& protocol, const native_handle_type& native_acceptor) - : basic_io_object(io_service) - { - asio::error_code ec; - this->get_service().assign(this->get_implementation(), - protocol, native_acceptor, ec); - asio::detail::throw_error(ec, "assign"); - } - -#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - /// Move-construct a basic_socket_acceptor from another. - /** - * This constructor moves an acceptor from one object to another. - * - * @param other The other basic_socket_acceptor object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket_acceptor(io_service&) constructor. - */ - basic_socket_acceptor(basic_socket_acceptor&& other) - : basic_io_object( - ASIO_MOVE_CAST(basic_socket_acceptor)(other)) - { - } - - /// Move-assign a basic_socket_acceptor from another. - /** - * This assignment operator moves an acceptor from one object to another. - * - * @param other The other basic_socket_acceptor object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket_acceptor(io_service&) constructor. - */ - basic_socket_acceptor& operator=(basic_socket_acceptor&& other) - { - basic_io_object::operator=( - ASIO_MOVE_CAST(basic_socket_acceptor)(other)); - return *this; - } - - // All socket acceptors have access to each other's implementations. - template - friend class basic_socket_acceptor; - - /// Move-construct a basic_socket_acceptor from an acceptor of another - /// protocol type. - /** - * This constructor moves an acceptor from one object to another. - * - * @param other The other basic_socket_acceptor object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket(io_service&) constructor. - */ - template - basic_socket_acceptor( - basic_socket_acceptor&& other, - typename enable_if::value>::type* = 0) - : basic_io_object(other.get_io_service()) - { - this->get_service().template converting_move_construct( - this->get_implementation(), other.get_implementation()); - } - - /// Move-assign a basic_socket_acceptor from an acceptor of another protocol - /// type. - /** - * This assignment operator moves an acceptor from one object to another. - * - * @param other The other basic_socket_acceptor object from which the move - * will occur. - * - * @note Following the move, the moved-from object is in the same state as if - * constructed using the @c basic_socket(io_service&) constructor. - */ - template - typename enable_if::value, - basic_socket_acceptor>::type& operator=( - basic_socket_acceptor&& other) - { - basic_socket_acceptor tmp(ASIO_MOVE_CAST2(basic_socket_acceptor< - Protocol1, SocketAcceptorService1>)(other)); - basic_io_object::operator=( - ASIO_MOVE_CAST(basic_socket_acceptor)(tmp)); - return *this; - } -#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) - - /// Open the acceptor using the specified protocol. - /** - * This function opens the socket acceptor so that it will use the specified - * protocol. - * - * @param protocol An object specifying which protocol is to be used. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * acceptor.open(asio::ip::tcp::v4()); - * @endcode - */ - void open(const protocol_type& protocol = protocol_type()) - { - asio::error_code ec; - this->get_service().open(this->get_implementation(), protocol, ec); - asio::detail::throw_error(ec, "open"); - } - - /// Open the acceptor using the specified protocol. - /** - * This function opens the socket acceptor so that it will use the specified - * protocol. - * - * @param protocol An object specifying which protocol is to be used. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * asio::error_code ec; - * acceptor.open(asio::ip::tcp::v4(), ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code open(const protocol_type& protocol, - asio::error_code& ec) - { - return this->get_service().open(this->get_implementation(), protocol, ec); - } - - /// Assigns an existing native acceptor to the acceptor. - /* - * This function opens the acceptor to hold an existing native acceptor. - * - * @param protocol An object specifying which protocol is to be used. - * - * @param native_acceptor A native acceptor. - * - * @throws asio::system_error Thrown on failure. - */ - void assign(const protocol_type& protocol, - const native_handle_type& native_acceptor) - { - asio::error_code ec; - this->get_service().assign(this->get_implementation(), - protocol, native_acceptor, ec); - asio::detail::throw_error(ec, "assign"); - } - - /// Assigns an existing native acceptor to the acceptor. - /* - * This function opens the acceptor to hold an existing native acceptor. - * - * @param protocol An object specifying which protocol is to be used. - * - * @param native_acceptor A native acceptor. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code assign(const protocol_type& protocol, - const native_handle_type& native_acceptor, asio::error_code& ec) - { - return this->get_service().assign(this->get_implementation(), - protocol, native_acceptor, ec); - } - - /// Determine whether the acceptor is open. - bool is_open() const - { - return this->get_service().is_open(this->get_implementation()); - } - - /// Bind the acceptor to the given local endpoint. - /** - * This function binds the socket acceptor to the specified endpoint on the - * local machine. - * - * @param endpoint An endpoint on the local machine to which the socket - * acceptor will be bound. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * asio::ip::tcp::endpoint endpoint(asio::ip::tcp::v4(), 12345); - * acceptor.open(endpoint.protocol()); - * acceptor.bind(endpoint); - * @endcode - */ - void bind(const endpoint_type& endpoint) - { - asio::error_code ec; - this->get_service().bind(this->get_implementation(), endpoint, ec); - asio::detail::throw_error(ec, "bind"); - } - - /// Bind the acceptor to the given local endpoint. - /** - * This function binds the socket acceptor to the specified endpoint on the - * local machine. - * - * @param endpoint An endpoint on the local machine to which the socket - * acceptor will be bound. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * asio::ip::tcp::endpoint endpoint(asio::ip::tcp::v4(), 12345); - * acceptor.open(endpoint.protocol()); - * asio::error_code ec; - * acceptor.bind(endpoint, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code bind(const endpoint_type& endpoint, - asio::error_code& ec) - { - return this->get_service().bind(this->get_implementation(), endpoint, ec); - } - - /// Place the acceptor into the state where it will listen for new - /// connections. - /** - * This function puts the socket acceptor into the state where it may accept - * new connections. - * - * @param backlog The maximum length of the queue of pending connections. - * - * @throws asio::system_error Thrown on failure. - */ - void listen(int backlog = socket_base::max_connections) - { - asio::error_code ec; - this->get_service().listen(this->get_implementation(), backlog, ec); - asio::detail::throw_error(ec, "listen"); - } - - /// Place the acceptor into the state where it will listen for new - /// connections. - /** - * This function puts the socket acceptor into the state where it may accept - * new connections. - * - * @param backlog The maximum length of the queue of pending connections. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::error_code ec; - * acceptor.listen(asio::socket_base::max_connections, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code listen(int backlog, asio::error_code& ec) - { - return this->get_service().listen(this->get_implementation(), backlog, ec); - } - - /// Close the acceptor. - /** - * This function is used to close the acceptor. Any asynchronous accept - * operations will be cancelled immediately. - * - * A subsequent call to open() is required before the acceptor can again be - * used to again perform socket accept operations. - * - * @throws asio::system_error Thrown on failure. - */ - void close() - { - asio::error_code ec; - this->get_service().close(this->get_implementation(), ec); - asio::detail::throw_error(ec, "close"); - } - - /// Close the acceptor. - /** - * This function is used to close the acceptor. Any asynchronous accept - * operations will be cancelled immediately. - * - * A subsequent call to open() is required before the acceptor can again be - * used to again perform socket accept operations. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::error_code ec; - * acceptor.close(ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - asio::error_code close(asio::error_code& ec) - { - return this->get_service().close(this->get_implementation(), ec); - } - - /// (Deprecated: Use native_handle().) Get the native acceptor representation. - /** - * This function may be used to obtain the underlying representation of the - * acceptor. This is intended to allow access to native acceptor functionality - * that is not otherwise provided. - */ - native_type native() - { - return this->get_service().native_handle(this->get_implementation()); - } - - /// Get the native acceptor representation. - /** - * This function may be used to obtain the underlying representation of the - * acceptor. This is intended to allow access to native acceptor functionality - * that is not otherwise provided. - */ - native_handle_type native_handle() - { - return this->get_service().native_handle(this->get_implementation()); - } - - /// Cancel all asynchronous operations associated with the acceptor. - /** - * This function causes all outstanding asynchronous connect, send and receive - * operations to finish immediately, and the handlers for cancelled operations - * will be passed the asio::error::operation_aborted error. - * - * @throws asio::system_error Thrown on failure. - */ - void cancel() - { - asio::error_code ec; - this->get_service().cancel(this->get_implementation(), ec); - asio::detail::throw_error(ec, "cancel"); - } - - /// Cancel all asynchronous operations associated with the acceptor. - /** - * This function causes all outstanding asynchronous connect, send and receive - * operations to finish immediately, and the handlers for cancelled operations - * will be passed the asio::error::operation_aborted error. - * - * @param ec Set to indicate what error occurred, if any. - */ - asio::error_code cancel(asio::error_code& ec) - { - return this->get_service().cancel(this->get_implementation(), ec); - } - - /// Set an option on the acceptor. - /** - * This function is used to set an option on the acceptor. - * - * @param option The new option value to be set on the acceptor. - * - * @throws asio::system_error Thrown on failure. - * - * @sa SettableSocketOption @n - * asio::socket_base::reuse_address - * asio::socket_base::enable_connection_aborted - * - * @par Example - * Setting the SOL_SOCKET/SO_REUSEADDR option: - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::acceptor::reuse_address option(true); - * acceptor.set_option(option); - * @endcode - */ - template - void set_option(const SettableSocketOption& option) - { - asio::error_code ec; - this->get_service().set_option(this->get_implementation(), option, ec); - asio::detail::throw_error(ec, "set_option"); - } - - /// Set an option on the acceptor. - /** - * This function is used to set an option on the acceptor. - * - * @param option The new option value to be set on the acceptor. - * - * @param ec Set to indicate what error occurred, if any. - * - * @sa SettableSocketOption @n - * asio::socket_base::reuse_address - * asio::socket_base::enable_connection_aborted - * - * @par Example - * Setting the SOL_SOCKET/SO_REUSEADDR option: - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::acceptor::reuse_address option(true); - * asio::error_code ec; - * acceptor.set_option(option, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - template - asio::error_code set_option(const SettableSocketOption& option, - asio::error_code& ec) - { - return this->get_service().set_option( - this->get_implementation(), option, ec); - } - - /// Get an option from the acceptor. - /** - * This function is used to get the current value of an option on the - * acceptor. - * - * @param option The option value to be obtained from the acceptor. - * - * @throws asio::system_error Thrown on failure. - * - * @sa GettableSocketOption @n - * asio::socket_base::reuse_address - * - * @par Example - * Getting the value of the SOL_SOCKET/SO_REUSEADDR option: - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::acceptor::reuse_address option; - * acceptor.get_option(option); - * bool is_set = option.get(); - * @endcode - */ - template - void get_option(GettableSocketOption& option) - { - asio::error_code ec; - this->get_service().get_option(this->get_implementation(), option, ec); - asio::detail::throw_error(ec, "get_option"); - } - - /// Get an option from the acceptor. - /** - * This function is used to get the current value of an option on the - * acceptor. - * - * @param option The option value to be obtained from the acceptor. - * - * @param ec Set to indicate what error occurred, if any. - * - * @sa GettableSocketOption @n - * asio::socket_base::reuse_address - * - * @par Example - * Getting the value of the SOL_SOCKET/SO_REUSEADDR option: - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::acceptor::reuse_address option; - * asio::error_code ec; - * acceptor.get_option(option, ec); - * if (ec) - * { - * // An error occurred. - * } - * bool is_set = option.get(); - * @endcode - */ - template - asio::error_code get_option(GettableSocketOption& option, - asio::error_code& ec) - { - return this->get_service().get_option( - this->get_implementation(), option, ec); - } - - /// Perform an IO control command on the acceptor. - /** - * This function is used to execute an IO control command on the acceptor. - * - * @param command The IO control command to be performed on the acceptor. - * - * @throws asio::system_error Thrown on failure. - * - * @sa IoControlCommand @n - * asio::socket_base::non_blocking_io - * - * @par Example - * Getting the number of bytes ready to read: - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::acceptor::non_blocking_io command(true); - * socket.io_control(command); - * @endcode - */ - template - void io_control(IoControlCommand& command) - { - asio::error_code ec; - this->get_service().io_control(this->get_implementation(), command, ec); - asio::detail::throw_error(ec, "io_control"); - } - - /// Perform an IO control command on the acceptor. - /** - * This function is used to execute an IO control command on the acceptor. - * - * @param command The IO control command to be performed on the acceptor. - * - * @param ec Set to indicate what error occurred, if any. - * - * @sa IoControlCommand @n - * asio::socket_base::non_blocking_io - * - * @par Example - * Getting the number of bytes ready to read: - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::acceptor::non_blocking_io command(true); - * asio::error_code ec; - * socket.io_control(command, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - template - asio::error_code io_control(IoControlCommand& command, - asio::error_code& ec) - { - return this->get_service().io_control( - this->get_implementation(), command, ec); - } - - /// Gets the non-blocking mode of the acceptor. - /** - * @returns @c true if the acceptor's synchronous operations will fail with - * asio::error::would_block if they are unable to perform the requested - * operation immediately. If @c false, synchronous operations will block - * until complete. - * - * @note The non-blocking mode has no effect on the behaviour of asynchronous - * operations. Asynchronous operations will never fail with the error - * asio::error::would_block. - */ - bool non_blocking() const - { - return this->get_service().non_blocking(this->get_implementation()); - } - - /// Sets the non-blocking mode of the acceptor. - /** - * @param mode If @c true, the acceptor's synchronous operations will fail - * with asio::error::would_block if they are unable to perform the - * requested operation immediately. If @c false, synchronous operations will - * block until complete. - * - * @throws asio::system_error Thrown on failure. - * - * @note The non-blocking mode has no effect on the behaviour of asynchronous - * operations. Asynchronous operations will never fail with the error - * asio::error::would_block. - */ - void non_blocking(bool mode) - { - asio::error_code ec; - this->get_service().non_blocking(this->get_implementation(), mode, ec); - asio::detail::throw_error(ec, "non_blocking"); - } - - /// Sets the non-blocking mode of the acceptor. - /** - * @param mode If @c true, the acceptor's synchronous operations will fail - * with asio::error::would_block if they are unable to perform the - * requested operation immediately. If @c false, synchronous operations will - * block until complete. - * - * @param ec Set to indicate what error occurred, if any. - * - * @note The non-blocking mode has no effect on the behaviour of asynchronous - * operations. Asynchronous operations will never fail with the error - * asio::error::would_block. - */ - asio::error_code non_blocking( - bool mode, asio::error_code& ec) - { - return this->get_service().non_blocking( - this->get_implementation(), mode, ec); - } - - /// Gets the non-blocking mode of the native acceptor implementation. - /** - * This function is used to retrieve the non-blocking mode of the underlying - * native acceptor. This mode has no effect on the behaviour of the acceptor - * object's synchronous operations. - * - * @returns @c true if the underlying acceptor is in non-blocking mode and - * direct system calls may fail with asio::error::would_block (or the - * equivalent system error). - * - * @note The current non-blocking mode is cached by the acceptor object. - * Consequently, the return value may be incorrect if the non-blocking mode - * was set directly on the native acceptor. - */ - bool native_non_blocking() const - { - return this->get_service().native_non_blocking(this->get_implementation()); - } - - /// Sets the non-blocking mode of the native acceptor implementation. - /** - * This function is used to modify the non-blocking mode of the underlying - * native acceptor. It has no effect on the behaviour of the acceptor object's - * synchronous operations. - * - * @param mode If @c true, the underlying acceptor is put into non-blocking - * mode and direct system calls may fail with asio::error::would_block - * (or the equivalent system error). - * - * @throws asio::system_error Thrown on failure. If the @c mode is - * @c false, but the current value of @c non_blocking() is @c true, this - * function fails with asio::error::invalid_argument, as the - * combination does not make sense. - */ - void native_non_blocking(bool mode) - { - asio::error_code ec; - this->get_service().native_non_blocking( - this->get_implementation(), mode, ec); - asio::detail::throw_error(ec, "native_non_blocking"); - } - - /// Sets the non-blocking mode of the native acceptor implementation. - /** - * This function is used to modify the non-blocking mode of the underlying - * native acceptor. It has no effect on the behaviour of the acceptor object's - * synchronous operations. - * - * @param mode If @c true, the underlying acceptor is put into non-blocking - * mode and direct system calls may fail with asio::error::would_block - * (or the equivalent system error). - * - * @param ec Set to indicate what error occurred, if any. If the @c mode is - * @c false, but the current value of @c non_blocking() is @c true, this - * function fails with asio::error::invalid_argument, as the - * combination does not make sense. - */ - asio::error_code native_non_blocking( - bool mode, asio::error_code& ec) - { - return this->get_service().native_non_blocking( - this->get_implementation(), mode, ec); - } - - /// Get the local endpoint of the acceptor. - /** - * This function is used to obtain the locally bound endpoint of the acceptor. - * - * @returns An object that represents the local endpoint of the acceptor. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::endpoint endpoint = acceptor.local_endpoint(); - * @endcode - */ - endpoint_type local_endpoint() const - { - asio::error_code ec; - endpoint_type ep = this->get_service().local_endpoint( - this->get_implementation(), ec); - asio::detail::throw_error(ec, "local_endpoint"); - return ep; - } - - /// Get the local endpoint of the acceptor. - /** - * This function is used to obtain the locally bound endpoint of the acceptor. - * - * @param ec Set to indicate what error occurred, if any. - * - * @returns An object that represents the local endpoint of the acceptor. - * Returns a default-constructed endpoint object if an error occurred and the - * error handler did not throw an exception. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::error_code ec; - * asio::ip::tcp::endpoint endpoint = acceptor.local_endpoint(ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - endpoint_type local_endpoint(asio::error_code& ec) const - { - return this->get_service().local_endpoint(this->get_implementation(), ec); - } - - /// Accept a new connection. - /** - * This function is used to accept a new connection from a peer into the - * given socket. The function call will block until a new connection has been - * accepted successfully or an error occurs. - * - * @param peer The socket into which the new connection will be accepted. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::socket socket(io_service); - * acceptor.accept(socket); - * @endcode - */ - template - void accept(basic_socket& peer, - typename enable_if::value>::type* = 0) - { - asio::error_code ec; - this->get_service().accept(this->get_implementation(), - peer, static_cast(0), ec); - asio::detail::throw_error(ec, "accept"); - } - - /// Accept a new connection. - /** - * This function is used to accept a new connection from a peer into the - * given socket. The function call will block until a new connection has been - * accepted successfully or an error occurs. - * - * @param peer The socket into which the new connection will be accepted. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::soocket socket(io_service); - * asio::error_code ec; - * acceptor.accept(socket, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - template - asio::error_code accept( - basic_socket& peer, - asio::error_code& ec, - typename enable_if::value>::type* = 0) - { - return this->get_service().accept(this->get_implementation(), - peer, static_cast(0), ec); - } - - /// Start an asynchronous accept. - /** - * This function is used to asynchronously accept a new connection into a - * socket. The function call always returns immediately. - * - * @param peer The socket into which the new connection will be accepted. - * Ownership of the peer object is retained by the caller, which must - * guarantee that it is valid until the handler is called. - * - * @param handler The handler to be called when the accept operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error // Result of operation. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - * - * @par Example - * @code - * void accept_handler(const asio::error_code& error) - * { - * if (!error) - * { - * // Accept succeeded. - * } - * } - * - * ... - * - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::socket socket(io_service); - * acceptor.async_accept(socket, accept_handler); - * @endcode - */ - template - ASIO_INITFN_RESULT_TYPE(AcceptHandler, - void (asio::error_code)) - async_accept(basic_socket& peer, - ASIO_MOVE_ARG(AcceptHandler) handler, - typename enable_if::value>::type* = 0) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a AcceptHandler. - ASIO_ACCEPT_HANDLER_CHECK(AcceptHandler, handler) type_check; - - return this->get_service().async_accept(this->get_implementation(), - peer, static_cast(0), - ASIO_MOVE_CAST(AcceptHandler)(handler)); - } - - /// Accept a new connection and obtain the endpoint of the peer - /** - * This function is used to accept a new connection from a peer into the - * given socket, and additionally provide the endpoint of the remote peer. - * The function call will block until a new connection has been accepted - * successfully or an error occurs. - * - * @param peer The socket into which the new connection will be accepted. - * - * @param peer_endpoint An endpoint object which will receive the endpoint of - * the remote peer. - * - * @throws asio::system_error Thrown on failure. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::socket socket(io_service); - * asio::ip::tcp::endpoint endpoint; - * acceptor.accept(socket, endpoint); - * @endcode - */ - template - void accept(basic_socket& peer, - endpoint_type& peer_endpoint) - { - asio::error_code ec; - this->get_service().accept(this->get_implementation(), - peer, &peer_endpoint, ec); - asio::detail::throw_error(ec, "accept"); - } - - /// Accept a new connection and obtain the endpoint of the peer - /** - * This function is used to accept a new connection from a peer into the - * given socket, and additionally provide the endpoint of the remote peer. - * The function call will block until a new connection has been accepted - * successfully or an error occurs. - * - * @param peer The socket into which the new connection will be accepted. - * - * @param peer_endpoint An endpoint object which will receive the endpoint of - * the remote peer. - * - * @param ec Set to indicate what error occurred, if any. - * - * @par Example - * @code - * asio::ip::tcp::acceptor acceptor(io_service); - * ... - * asio::ip::tcp::socket socket(io_service); - * asio::ip::tcp::endpoint endpoint; - * asio::error_code ec; - * acceptor.accept(socket, endpoint, ec); - * if (ec) - * { - * // An error occurred. - * } - * @endcode - */ - template - asio::error_code accept( - basic_socket& peer, - endpoint_type& peer_endpoint, asio::error_code& ec) - { - return this->get_service().accept( - this->get_implementation(), peer, &peer_endpoint, ec); - } - - /// Start an asynchronous accept. - /** - * This function is used to asynchronously accept a new connection into a - * socket, and additionally obtain the endpoint of the remote peer. The - * function call always returns immediately. - * - * @param peer The socket into which the new connection will be accepted. - * Ownership of the peer object is retained by the caller, which must - * guarantee that it is valid until the handler is called. - * - * @param peer_endpoint An endpoint object into which the endpoint of the - * remote peer will be written. Ownership of the peer_endpoint object is - * retained by the caller, which must guarantee that it is valid until the - * handler is called. - * - * @param handler The handler to be called when the accept operation - * completes. Copies will be made of the handler as required. The function - * signature of the handler must be: - * @code void handler( - * const asio::error_code& error // Result of operation. - * ); @endcode - * Regardless of whether the asynchronous operation completes immediately or - * not, the handler will not be invoked from within this function. Invocation - * of the handler will be performed in a manner equivalent to using - * asio::io_service::post(). - */ - template - ASIO_INITFN_RESULT_TYPE(AcceptHandler, - void (asio::error_code)) - async_accept(basic_socket& peer, - endpoint_type& peer_endpoint, ASIO_MOVE_ARG(AcceptHandler) handler) - { - // If you get an error on the following line it means that your handler does - // not meet the documented type requirements for a AcceptHandler. - ASIO_ACCEPT_HANDLER_CHECK(AcceptHandler, handler) type_check; - - return this->get_service().async_accept(this->get_implementation(), peer, - &peer_endpoint, ASIO_MOVE_CAST(AcceptHandler)(handler)); - } -}; - -} // namespace asio - -#include "asio/detail/pop_options.hpp" - -#endif // ASIO_BASIC_SOCKET_ACCEPTOR_HPP diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_iostream.hpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_iostream.hpp deleted file mode 100644 index 81754022131f6..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/basic_socket_iostream.hpp +++ /dev/null @@ -1,286 +0,0 @@ -// -// basic_socket_iostream.hpp -// ~~~~~~~~~~~~~~~~~~~~~~~~~ -// -// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef ASIO_BASIC_SOCKET_IOSTREAM_HPP -#define ASIO_BASIC_SOCKET_IOSTREAM_HPP - -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -# pragma once -#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) - -#include "asio/detail/config.hpp" - -#if !defined(ASIO_NO_IOSTREAM) - -#include -#include -#include "asio/basic_socket_streambuf.hpp" -#include "asio/stream_socket_service.hpp" - -#if !defined(ASIO_HAS_VARIADIC_TEMPLATES) - -# include "asio/detail/variadic_templates.hpp" - -// A macro that should expand to: -// template -// explicit basic_socket_iostream(T1 x1, ..., Tn xn) -// : std::basic_iostream( -// &this->detail::socket_iostream_base< -// Protocol, StreamSocketService, Time, -// TimeTraits, TimerService>::streambuf_) -// { -// if (rdbuf()->connect(x1, ..., xn) == 0) -// this->setstate(std::ios_base::failbit); -// } -// This macro should only persist within this file. - -# define ASIO_PRIVATE_CTR_DEF(n) \ - template \ - explicit basic_socket_iostream(ASIO_VARIADIC_PARAMS(n)) \ - : std::basic_iostream( \ - &this->detail::socket_iostream_base< \ - Protocol, StreamSocketService, Time, \ - TimeTraits, TimerService>::streambuf_) \ - { \ - this->setf(std::ios_base::unitbuf); \ - if (rdbuf()->connect(ASIO_VARIADIC_ARGS(n)) == 0) \ - this->setstate(std::ios_base::failbit); \ - } \ - /**/ - -// A macro that should expand to: -// template -// void connect(T1 x1, ..., Tn xn) -// { -// if (rdbuf()->connect(x1, ..., xn) == 0) -// this->setstate(std::ios_base::failbit); -// } -// This macro should only persist within this file. - -# define ASIO_PRIVATE_CONNECT_DEF(n) \ - template \ - void connect(ASIO_VARIADIC_PARAMS(n)) \ - { \ - if (rdbuf()->connect(ASIO_VARIADIC_ARGS(n)) == 0) \ - this->setstate(std::ios_base::failbit); \ - } \ - /**/ - -#endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES) - -#include "asio/detail/push_options.hpp" - -namespace asio { -namespace detail { - -// A separate base class is used to ensure that the streambuf is initialised -// prior to the basic_socket_iostream's basic_iostream base class. -template -class socket_iostream_base -{ -protected: - basic_socket_streambuf streambuf_; -}; - -} - -/// Iostream interface for a socket. -template , -#if defined(ASIO_HAS_BOOST_DATE_TIME) \ - || defined(GENERATING_DOCUMENTATION) - typename Time = boost::posix_time::ptime, - typename TimeTraits = asio::time_traits